positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def ck_portf_004(self): ''' 價走平一個半月。(箱型整理、盤整) ''' return self.a.SD < 0.25 and self.a.stock_vol[-1] > 1000*1000 and self.a.raw_data[-1] > 10
價走平一個半月。(箱型整理、盤整)
def __modify(self, subscription_id, **kwargs): """Call documentation: `/subscription/modify <https://www.wepay.com/developer/reference/subscription#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay` """ params = { 'subscription_id': subscription_id } return self.make_call(self.__modify, params, kwargs)
Call documentation: `/subscription/modify <https://www.wepay.com/developer/reference/subscription#modify>`_, plus extra keyword parameters: :keyword str access_token: will be used instead of instance's ``access_token``, with ``batch_mode=True`` will set `authorization` param to it's value. :keyword bool batch_mode: turn on/off the batch_mode, see :class:`wepay.api.WePay` :keyword str batch_reference_id: `reference_id` param for batch call, see :class:`wepay.api.WePay` :keyword str api_version: WePay API version, see :class:`wepay.api.WePay`
def from_mult_iters(cls, name=None, idx=None, **kwargs): """Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised. """ if not name: name = 'table' lengths = [len(v) for v in kwargs.values()] if len(set(lengths)) != 1: raise ValueError('Iterables must all be same length') if not idx: raise ValueError('Must provide iter name index reference') index = kwargs.pop(idx) vega_vals = [] for k, v in sorted(kwargs.items()): for idx, val in zip(index, v): value = {} value['idx'] = idx value['col'] = k value['val'] = val vega_vals.append(value) return cls(name, values=vega_vals)
Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised.
def source(self): """Parameters for saving zip backups""" with gui.FlexForm(self.title, auto_size_text=True, default_element_size=(40, 1)) as form: layout = [ [gui.Text('Zip Backup utility', size=(30, 1), font=("Helvetica", 30), text_color='blue')], [gui.Text('Create a zip backup of a file or directory.', size=(50, 1), font=("Helvetica", 18), text_color='black')], [gui.Text('-' * 200)], # Source [gui.Text('Select source folder', size=(20, 1), font=("Helvetica", 25), auto_size_text=False), gui.InputText('', key='source', font=("Helvetica", 20)), gui.FolderBrowse()], [gui.Submit(), gui.Cancel()]] button, values = form.LayoutAndRead(layout) if button == 'Submit': return values['source'] else: exit()
Parameters for saving zip backups
def dumps(obj, indent=None, default=None, sort_keys=False, **kw): """Dump string.""" return YAMLEncoder(indent=indent, default=default, sort_keys=sort_keys, **kw).encode(obj)
Dump string.
def process_vlen(data_header, array): """Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array """ source = iter(array) return np.array([np.fromiter(itertools.islice(source, size), dtype=array.dtype) for size in data_header.vlens])
Process vlen coming back from NCStream v2. This takes the array of values and slices into an object array, with entries containing the appropriate pieces of the original array. Sizes are controlled by the passed in `data_header`. Parameters ---------- data_header : Header array : :class:`numpy.ndarray` Returns ------- ndarray object array containing sub-sequences from the original primitive array
def add_build_tags(self, tags, project, build_id): """AddBuildTags. Adds tags to a build. :param [str] tags: The tags to add. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: [str] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') content = self._serialize.body(tags, '[str]') response = self._send(http_method='POST', location_id='6e6114b2-8161-44c8-8f6c-c5505782427f', version='5.0', route_values=route_values, content=content) return self._deserialize('[str]', self._unwrap_collection(response))
AddBuildTags. Adds tags to a build. :param [str] tags: The tags to add. :param str project: Project ID or project name :param int build_id: The ID of the build. :rtype: [str]
def process(self, endpoint, **kwargs): """ Polls the data from prometheus and pushes them as gauges `endpoint` is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a 'tags' attribute, it will be pushed automatically as additional custom tags and added to the metrics """ instance = kwargs.get('instance') if instance: kwargs['custom_tags'] = instance.get('tags', []) for metric in self.scrape_metrics(endpoint): self.process_metric(metric, **kwargs)
Polls the data from prometheus and pushes them as gauges `endpoint` is the metrics endpoint to use to poll metrics from Prometheus Note that if the instance has a 'tags' attribute, it will be pushed automatically as additional custom tags and added to the metrics
def get_bookmarks(self): """ Get the stored bookmarks from the server. Causes signals to be fired to reflect the changes. :returns: a list of bookmarks """ with (yield from self._lock): bookmarks = yield from self._get_bookmarks() self._diff_emit_update(bookmarks) return bookmarks
Get the stored bookmarks from the server. Causes signals to be fired to reflect the changes. :returns: a list of bookmarks
def alter_database_admin(self, username, is_admin): """Alter the database admin.""" url = "db/{0}/users/{1}".format(self._database, username) data = {'admin': is_admin} self.request( url=url, method='POST', data=data, expected_response_code=200 ) return True
Alter the database admin.
def add_comment(self, comment): """ Add a comment to the database. Args: comment (hotdoc.core.Comment): comment to add """ if not comment: return self.__comments[comment.name] = comment self.comment_added_signal(self, comment)
Add a comment to the database. Args: comment (hotdoc.core.Comment): comment to add
def init_host(self): """ Initial host """ env.host_string = self.host_string env.user = self.host_user env.password = self.host_passwd env.key_filename = self.host_keyfile
Initial host
def get_items_by_ids(self, item_ids, item_type=None): """Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type """ urls = [urljoin(self.item_url, F"{i}.json") for i in item_ids] result = self._run_async(urls=urls) items = [Item(r) for r in result if r] if item_type: return [item for item in items if item.item_type == item_type] else: return items
Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type
def list2pd(all_data, subjindex=None, listindex=None): """ Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number """ # set default index if it is not defined # max_nlists = max(map(lambda x: len(x), all_data)) listindex = [[idx for idx in range(len(sub))] for sub in all_data] if not listindex else listindex subjindex = [idx for idx,subj in enumerate(all_data)] if not subjindex else subjindex def make_multi_index(listindex, sub_num): return pd.MultiIndex.from_tuples([(sub_num,lst) for lst in listindex], names = ['Subject', 'List']) listindex = list(listindex) subjindex = list(subjindex) subs_list_of_dfs = [pd.DataFrame(sub_data, index=make_multi_index(listindex[sub_num], subjindex[sub_num])) for sub_num,sub_data in enumerate(all_data)] return pd.concat(subs_list_of_dfs)
Makes multi-indexed dataframe of subject data Parameters ---------- all_data : list of lists of strings strings are either all presented or all recalled items, in the order of presentation or recall *should also work for presented / recalled ints and floats, if desired Returns ---------- subs_list_of_dfs : multi-indexed dataframe dataframe of subject data (presented or recalled words/items), indexed by subject and list number cell populated by the term presented or recalled in the position indicated by the column number
def find_all(self, cls): """Required functionality.""" final_results = [] table = self.get_class_table(cls) for db_result in table.scan(): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results
Required functionality.
def count_SMS(self, conditions={}): """ Count all certified sms """ url = self.SMS_COUNT_URL + "?" for key, value in conditions.items(): if key is 'ids': value = ",".join(value) url += '&%s=%s' % (key, value) connection = Connection(self.token) connection.set_url(self.production, url) connection.set_url(self.production, url) return connection.get_request()
Count all certified sms
def tag_subcat_info( mrf_lines, subcat_rules ): ''' Adds subcategorization information (hashtags) to verbs and adpositions; Argument subcat_rules must be a dict containing subcategorization information, loaded via method load_subcat_info(); Performs word lemma lookups in subcat_rules, and in case of a match, checks word part-of-speech conditions. If the POS conditions match, adds subcategorization information either to a single analysis line, or to multiple analysis lines (depending on the exact conditions in the rule); Returns the input list where verb/adposition analyses have been augmented with available subcategorization information; ''' i = 0 while ( i < len(mrf_lines) ): line = mrf_lines[i] if line.startswith(' '): lemma_match = analysisLemmaPat.match(line) if lemma_match: lemma = lemma_match.group(1) # Find whether there is subcategorization info associated # with the lemma if lemma in subcat_rules: analysis_match = analysisPat.search(line) if not analysis_match: raise Exception(' Could not find analysis from the line:',line) analysis = analysis_match.group(1) for rule in subcat_rules[lemma]: condition, addition = rule.split('>') # Check the condition string; If there are multiple conditions, # all must be satisfied for the rule to fire condition = condition.strip() conditions = condition.split() satisfied1 = [ _check_condition(c, analysis) for c in conditions ] if all( satisfied1 ): # # There can be multiple additions: # 1) additions without '|' must be added to a single analysis line; # 2) additions separated by '|' must be placed on separate analysis # lines; # additions = addition.split('|') j = i # Add new line or lines for a in additions: line_copy = line if i == j else line[:] items_to_add = a.split() for item in items_to_add: if not _check_condition(item, analysis): line_copy = \ re.sub('(//.+\S)\s+//', '\\1 '+item+' //', line_copy) if j == i: # 1) replace the existing line mrf_lines[i] = line_copy else: # 2) add a new line mrf_lines.insert(i, line_copy) j += 1 i = j - 1 # No need to search forward break i += 1 return mrf_lines
Adds subcategorization information (hashtags) to verbs and adpositions; Argument subcat_rules must be a dict containing subcategorization information, loaded via method load_subcat_info(); Performs word lemma lookups in subcat_rules, and in case of a match, checks word part-of-speech conditions. If the POS conditions match, adds subcategorization information either to a single analysis line, or to multiple analysis lines (depending on the exact conditions in the rule); Returns the input list where verb/adposition analyses have been augmented with available subcategorization information;
def parse_declaration_expressn_memberaccess(self, lhsAST, rhsAST, es): """ Instead of "Class.variablename", use "Class.rv('variablename')". :param lhsAST: :param rhsAST: :param es: :return: """ if isinstance(lhsAST, wdl_parser.Terminal): es = es + lhsAST.source_string elif isinstance(lhsAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(lhsAST, wdl_parser.AstList): raise NotImplementedError es = es + '_' if isinstance(rhsAST, wdl_parser.Terminal): es = es + rhsAST.source_string elif isinstance(rhsAST, wdl_parser.Ast): raise NotImplementedError elif isinstance(rhsAST, wdl_parser.AstList): raise NotImplementedError return es
Instead of "Class.variablename", use "Class.rv('variablename')". :param lhsAST: :param rhsAST: :param es: :return:
def supported_types_for_non_geo_entity(country_code): """Returns the types for a country-code belonging to a non-geographical entity which the library has metadata for. Will not include FIXED_LINE_OR_MOBILE (if numbers for this non-geographical entity could be classified as FIXED_LINE_OR_MOBILE, both FIXED_LINE and MOBILE would be present) and UNKNOWN. No types will be returned for country calling codes that do not map to a known non-geographical entity. """ metadata = PhoneMetadata.metadata_for_nongeo_region(country_code, None) if metadata is None: return set() return _supported_types_for_metadata(metadata)
Returns the types for a country-code belonging to a non-geographical entity which the library has metadata for. Will not include FIXED_LINE_OR_MOBILE (if numbers for this non-geographical entity could be classified as FIXED_LINE_OR_MOBILE, both FIXED_LINE and MOBILE would be present) and UNKNOWN. No types will be returned for country calling codes that do not map to a known non-geographical entity.
def merge_adjacent(dom, tag_name): """ Merge all adjacent tags with the specified tag name. Return the number of merges performed. """ for node in dom.getElementsByTagName(tag_name): prev_sib = node.previousSibling if prev_sib and prev_sib.nodeName == node.tagName: for child in list(node.childNodes): prev_sib.appendChild(child) remove_node(node)
Merge all adjacent tags with the specified tag name. Return the number of merges performed.
def render_files(self, root=None): """ Render the file path as accordions """ if root is None: tmp = os.environ.get('TMP') root = sys.path[1 if tmp and tmp in sys.path else 0] items = [] for filename in os.listdir(root): # for subdirname in dirnames: # path = os.path.join(dirname, subdirname) # items.append(FOLDER_TMPL.format( # name=subdirname, # id=path, # items=self.render_files(path) # )) #for filename in filenames: f,ext = os.path.splitext(filename) if ext in ['.py', '.enaml']: items.append(FILE_TMPL.format( name=filename, id=filename )) return "".join(items)
Render the file path as accordions
def get_machine_group_applied_configs(self, project_name, group_name): """ get the logtail config names applied in a machine group Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type group_name: string :param group_name: the group name list :return: GetMachineGroupAppliedConfigResponse :raise: LogException """ headers = {} params = {} resource = "/machinegroups/" + group_name + "/configs" (resp, header) = self._send("GET", project_name, None, resource, params, headers) return GetMachineGroupAppliedConfigResponse(resp, header)
get the logtail config names applied in a machine group Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type group_name: string :param group_name: the group name list :return: GetMachineGroupAppliedConfigResponse :raise: LogException
def device_connect(device_id): """ Force a connection attempt via HTTP GET. """ success = False if device_id in devices: devices[device_id].connect() success = True return jsonify(success=success)
Force a connection attempt via HTTP GET.
def proportion_merge(brands, exemplars): """ Return the proportion of a brand's followers who also follower an exemplar. We merge all exemplar followers into one big pseudo-account.""" scores = {} exemplar_followers = set() for followers in exemplars.values(): exemplar_followers |= followers for brand, followers in brands: scores[brand] = _proportion(followers, exemplar_followers) return scores
Return the proportion of a brand's followers who also follower an exemplar. We merge all exemplar followers into one big pseudo-account.
def get_analysis_question(hazard, exposure): """Construct analysis question based on hazard and exposure. :param hazard: A hazard definition. :type hazard: dict :param exposure: An exposure definition. :type exposure: dict :returns: Analysis question based on reporting standards. :rtype: str """ # First we look for a translated hardcoded question. question = specific_analysis_question(hazard, exposure) if question: return question if hazard == hazard_generic: # Secondly, if the hazard is generic, we don't need the hazard. question = tr( 'In each of the hazard zones {exposure_measure} {exposure_name} ' 'might be affected?').format( exposure_measure=exposure['measure_question'], exposure_name=exposure['name']) return question # Then, we fallback on a generated string on the fly. question = tr( 'In the event of a {hazard_name}, {exposure_measure} {exposure_name} ' 'might be affected?').format( hazard_name=hazard['name'], exposure_measure=exposure['measure_question'], exposure_name=exposure['name']) return question
Construct analysis question based on hazard and exposure. :param hazard: A hazard definition. :type hazard: dict :param exposure: An exposure definition. :type exposure: dict :returns: Analysis question based on reporting standards. :rtype: str
def parse(yaml, validate=True): """ Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :type yaml: list|dict|str|file :param validate: Whether to validate the data before attempting to parse it. :type validate: bool :return: Config object :rtype: valohai_yaml.objs.Config """ data = read_yaml(yaml) if validate: # pragma: no branch from .validation import validate validate(data, raise_exc=True) return Config.parse(data)
Parse the given YAML data into a `Config` object, optionally validating it first. :param yaml: YAML data (either a string, a stream, or pre-parsed Python dict/list) :type yaml: list|dict|str|file :param validate: Whether to validate the data before attempting to parse it. :type validate: bool :return: Config object :rtype: valohai_yaml.objs.Config
def input_value(self, locator, text): """Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value See `introduction` for details about locating elements. """ self._info("Setting text '%s' into text field '%s'" % (text, locator)) self._element_input_value_by_locator(locator, text)
Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value See `introduction` for details about locating elements.
def evaluate_binop_math(self, operation, left, right, **kwargs): """ Evaluate given mathematical binary operation with given operands. """ if not operation in self.binops_math: raise ValueError("Invalid math binary operation '{}'".format(operation)) if left is None or right is None: return None if not isinstance(left, (list, ListIP)): left = [left] if not isinstance(right, (list, ListIP)): right = [right] if not left or not right: return None try: vect = self._calculate_vector(operation, left, right) if len(vect) > 1: return vect return vect[0] except: return None
Evaluate given mathematical binary operation with given operands.
def code(ctx, show_hidden, query, single): """ Generate codes. Generate codes from credentials stored on your YubiKey. Provide a query string to match one or more specific credentials. Touch and HOTP credentials require a single match to be triggered. """ ensure_validated(ctx) controller = ctx.obj['controller'] creds = [(cr, c) for (cr, c) in controller.calculate_all() if show_hidden or not cr.is_hidden ] creds = _search(creds, query) if len(creds) == 1: cred, code = creds[0] if cred.touch: prompt_for_touch() try: if cred.oath_type == OATH_TYPE.HOTP: # HOTP might require touch, we don't know. # Assume yes after 500ms. hotp_touch_timer = Timer(0.500, prompt_for_touch) hotp_touch_timer.start() creds = [(cred, controller.calculate(cred))] hotp_touch_timer.cancel() elif code is None: creds = [(cred, controller.calculate(cred))] except APDUError as e: if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED: ctx.fail('Touch credential timed out!') elif single: _error_multiple_hits(ctx, [cr for cr, c in creds]) if single: click.echo(creds[0][1].value) else: creds.sort() outputs = [ ( cr.printable_key, c.value if c else '[Touch Credential]' if cr.touch else '[HOTP Credential]' if cr.oath_type == OATH_TYPE.HOTP else '' ) for (cr, c) in creds ] longest_name = max(len(n) for (n, c) in outputs) if outputs else 0 longest_code = max(len(c) for (n, c) in outputs) if outputs else 0 format_str = u'{:<%d} {:>%d}' % (longest_name, longest_code) for name, result in outputs: click.echo(format_str.format(name, result))
Generate codes. Generate codes from credentials stored on your YubiKey. Provide a query string to match one or more specific credentials. Touch and HOTP credentials require a single match to be triggered.
def update_course_settings(self, course_id, allow_student_discussion_editing=None, allow_student_discussion_topics=None, allow_student_forum_attachments=None, allow_student_organized_groups=None, hide_distribution_graphs=None, hide_final_grades=None, home_page_announcement_limit=None, lock_all_announcements=None, restrict_student_future_view=None, restrict_student_past_view=None, show_announcements_on_home_page=None): """ Update course settings. Can update the following course settings: """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - allow_student_discussion_topics """Let students create discussion topics""" if allow_student_discussion_topics is not None: data["allow_student_discussion_topics"] = allow_student_discussion_topics # OPTIONAL - allow_student_forum_attachments """Let students attach files to discussions""" if allow_student_forum_attachments is not None: data["allow_student_forum_attachments"] = allow_student_forum_attachments # OPTIONAL - allow_student_discussion_editing """Let students edit or delete their own discussion posts""" if allow_student_discussion_editing is not None: data["allow_student_discussion_editing"] = allow_student_discussion_editing # OPTIONAL - allow_student_organized_groups """Let students organize their own groups""" if allow_student_organized_groups is not None: data["allow_student_organized_groups"] = allow_student_organized_groups # OPTIONAL - hide_final_grades """Hide totals in student grades summary""" if hide_final_grades is not None: data["hide_final_grades"] = hide_final_grades # OPTIONAL - hide_distribution_graphs """Hide grade distribution graphs from students""" if hide_distribution_graphs is not None: data["hide_distribution_graphs"] = hide_distribution_graphs # OPTIONAL - lock_all_announcements """Disable comments on announcements""" if lock_all_announcements is not None: data["lock_all_announcements"] = lock_all_announcements # OPTIONAL - restrict_student_past_view """Restrict students from viewing courses after end date""" if restrict_student_past_view is not None: data["restrict_student_past_view"] = restrict_student_past_view # OPTIONAL - restrict_student_future_view """Restrict students from viewing courses before start date""" if restrict_student_future_view is not None: data["restrict_student_future_view"] = restrict_student_future_view # OPTIONAL - show_announcements_on_home_page """Show the most recent announcements on the Course home page (if a Wiki, defaults to five announcements, configurable via home_page_announcement_limit)""" if show_announcements_on_home_page is not None: data["show_announcements_on_home_page"] = show_announcements_on_home_page # OPTIONAL - home_page_announcement_limit """Limit the number of announcements on the home page if enabled via show_announcements_on_home_page""" if home_page_announcement_limit is not None: data["home_page_announcement_limit"] = home_page_announcement_limit self.logger.debug("PUT /api/v1/courses/{course_id}/settings with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/settings".format(**path), data=data, params=params, no_data=True)
Update course settings. Can update the following course settings:
def download(cls, url, filename=None): """ Download a file into the correct cache directory. """ return utility.download(url, cls.directory(), filename)
Download a file into the correct cache directory.
def process(self, salt_data, token, opts): ''' Process events and publish data ''' parts = salt_data['tag'].split('/') if len(parts) < 2: return # TBD: Simplify these conditional expressions if parts[1] == 'job': if parts[3] == 'new': self.process_new_job_event(salt_data) if salt_data['data']['fun'] == 'grains.items': self.minions = {} elif parts[3] == 'ret': self.process_ret_job_event(salt_data) if salt_data['data']['fun'] == 'grains.items': self.process_minion_update(salt_data) if parts[1] == 'key': self.process_key_event(salt_data) if parts[1] == 'presence': self.process_presence_events(salt_data, token, opts)
Process events and publish data
def _check_team_exists(team): """ Check that the team registry actually exists. """ if team is None: return hostname = urlparse(get_registry_url(team)).hostname try: socket.gethostbyname(hostname) except IOError: try: # Do we have internet? socket.gethostbyname('quiltdata.com') except IOError: message = "Can't find quiltdata.com. Check your internet connection." else: message = "Unable to connect to registry. Is the team name %r correct?" % team raise CommandException(message)
Check that the team registry actually exists.
def roundrobin(*iterables): """roundrobin('ABC', 'D', 'EF') --> A D E B F C""" raise NotImplementedError('not sure if this implementation is correct') # http://stackoverflow.com/questions/11125212/interleaving-lists-in-python #sentinel = object() #return (x for x in chain(*zip_longest(fillvalue=sentinel, *iterables)) if x is not sentinel) pending = len(iterables) if six.PY2: nexts = cycle(iter(it).next for it in iterables) else: nexts = cycle(iter(it).__next__ for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = cycle(islice(nexts, pending))
roundrobin('ABC', 'D', 'EF') --> A D E B F C
def limitsSql(startIndex=0, maxResults=0): """ Construct a SQL LIMIT clause """ if startIndex and maxResults: return " LIMIT {}, {}".format(startIndex, maxResults) elif startIndex: raise Exception("startIndex was provided, but maxResults was not") elif maxResults: return " LIMIT {}".format(maxResults) else: return ""
Construct a SQL LIMIT clause
def all_characters(self): ''' Returns a queryset of all characters associated with this node and its descendants, excluding any duplicates. ''' qs = self.assoc_characters.all() for node in self.get_descendants(): qs2 = node.assoc_characters.all() qs = qs.union(qs2).distinct('pk') return qs
Returns a queryset of all characters associated with this node and its descendants, excluding any duplicates.
def _build_memory_regions(self): """! @brief Creates memory region instances for the device. For each `<memory>` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions by this method. They will be converted to FlashRegions by _build_flash_regions(). """ for elem in self._info.memories: try: # Get the region name, type, and access permissions. if 'name' in elem.attrib: name = elem.attrib['name'] access = elem.attrib['access'] if ('p' in access): type = MemoryType.DEVICE elif ('w' in access): type = MemoryType.RAM else: type = MemoryType.ROM elif 'id' in elem.attrib: name = elem.attrib['id'] if 'RAM' in name: access = 'rwx' type = MemoryType.RAM else: access = 'rx' type = MemoryType.ROM else: continue # Both start and size are required attributes. start = int(elem.attrib['start'], base=0) size = int(elem.attrib['size'], base=0) isDefault = _get_bool_attribute(elem, 'default') isStartup = _get_bool_attribute(elem, 'startup') if isStartup: self._saw_startup = True attrs = { 'name': name, 'start': start, 'length': size, 'access': access, 'is_default': isDefault, 'is_boot_memory': isStartup, 'is_testable': isDefault, 'alias': elem.attrib.get('alias', None), } # Create the memory region and add to map. region = MEMORY_TYPE_CLASS_MAP[type](**attrs) self._regions.append(region) # Record the first default ram for use in flash algos. if self._default_ram is None and type == MemoryType.RAM and isDefault: self._default_ram = region except (KeyError, ValueError) as err: # Ignore errors. LOG.debug("ignoring error parsing memories for CMSIS-Pack devices %s: %s", self.part_number, str(err))
! @brief Creates memory region instances for the device. For each `<memory>` element in the device info, a memory region object is created and added to the `_regions` attribute. IROM or non-writable memories are created as RomRegions by this method. They will be converted to FlashRegions by _build_flash_regions().
def parse(self, xml_file): "Get a list of parsed recipes from BeerXML input" recipes = [] with open(xml_file, "rt") as f: tree = ElementTree.parse(f) for recipeNode in tree.iter(): if self.to_lower(recipeNode.tag) != "recipe": continue recipe = Recipe() recipes.append(recipe) for recipeProperty in list(recipeNode): tag_name = self.to_lower(recipeProperty.tag) if tag_name == "fermentables": for fermentable_node in list(recipeProperty): fermentable = Fermentable() self.nodes_to_object(fermentable_node, fermentable) recipe.fermentables.append(fermentable) elif tag_name == "yeasts": for yeast_node in list(recipeProperty): yeast = Yeast() self.nodes_to_object(yeast_node, yeast) recipe.yeasts.append(yeast) elif tag_name == "hops": for hop_node in list(recipeProperty): hop = Hop() self.nodes_to_object(hop_node, hop) recipe.hops.append(hop) elif tag_name == "miscs": for misc_node in list(recipeProperty): misc = Misc() self.nodes_to_object(misc_node, misc) recipe.miscs.append(misc) elif tag_name == "style": style = Style() recipe.style = style self.nodes_to_object(recipeProperty, style) elif tag_name == "mash": for mash_node in list(recipeProperty): mash = Mash() recipe.mash = mash if self.to_lower(mash_node.tag) == "mash_steps": for mash_step_node in list(mash_node): mash_step = MashStep() self.nodes_to_object(mash_step_node, mash_step) mash.steps.append(mash_step) else: self.nodes_to_object(mash_node, mash) else: self.node_to_object(recipeProperty, recipe) return recipes
Get a list of parsed recipes from BeerXML input
def connect(self): """ Connects to the Responsys soap service Uses the credentials passed to the client init to login and setup the session id returned. Returns True on successful connection, otherwise False. """ if self.session and self.session.is_expired: # Close the session to avoid max concurrent session errors self.disconnect(abandon_session=True) if not self.session: try: login_result = self.login(self.username, self.password) except AccountFault: log.error('Login failed, invalid username or password') raise else: self.session = login_result.session_id self.connected = time() return self.connected
Connects to the Responsys soap service Uses the credentials passed to the client init to login and setup the session id returned. Returns True on successful connection, otherwise False.
def QA_save_tdx_to_mongo(file_dir, client=DATABASE): """save file Arguments: file_dir {str:direction} -- 文件的地址 Keyword Arguments: client {Mongodb:Connection} -- Mongo Connection (default: {DATABASE}) """ reader = TdxMinBarReader() __coll = client.stock_min_five for a, v, files in os.walk(file_dir): for file in files: if (str(file)[0:2] == 'sh' and int(str(file)[2]) == 6) or \ (str(file)[0:2] == 'sz' and int(str(file)[2]) == 0) or \ (str(file)[0:2] == 'sz' and int(str(file)[2]) == 3): QA_util_log_info('Now_saving ' + str(file) [2:8] + '\'s 5 min tick') fname = file_dir + os.sep + file df = reader.get_df(fname) df['code'] = str(file)[2:8] df['market'] = str(file)[0:2] df['datetime'] = [str(x) for x in list(df.index)] df['date'] = [str(x)[0:10] for x in list(df.index)] df['time_stamp'] = df['datetime'].apply( lambda x: QA_util_time_stamp(x)) df['date_stamp'] = df['date'].apply( lambda x: QA_util_date_stamp(x)) data_json = json.loads(df.to_json(orient='records')) __coll.insert_many(data_json)
save file Arguments: file_dir {str:direction} -- 文件的地址 Keyword Arguments: client {Mongodb:Connection} -- Mongo Connection (default: {DATABASE})
def make_batched_timer(self, bucket_seconds, chunk_size=100): """ Creates and returns an object implementing :class:`txaio.IBatchedTimer`. :param bucket_seconds: the number of seconds in each bucket. That is, a value of 5 means that any timeout within a 5 second window will be in the same bucket, and get notified at the same time. This is only accurate to "milliseconds". :param chunk_size: when "doing" the callbacks in a particular bucket, this controls how many we do at once before yielding to the reactor. """ def get_seconds(): return self._get_loop().seconds() def create_delayed_call(delay, fun, *args, **kwargs): return self._get_loop().callLater(delay, fun, *args, **kwargs) return _BatchedTimer( bucket_seconds * 1000.0, chunk_size, seconds_provider=get_seconds, delayed_call_creator=create_delayed_call, )
Creates and returns an object implementing :class:`txaio.IBatchedTimer`. :param bucket_seconds: the number of seconds in each bucket. That is, a value of 5 means that any timeout within a 5 second window will be in the same bucket, and get notified at the same time. This is only accurate to "milliseconds". :param chunk_size: when "doing" the callbacks in a particular bucket, this controls how many we do at once before yielding to the reactor.
def screenshot(self, viewID, filename): """screenshot(string, string) -> None Save a screenshot for the given view to the given filename. The fileformat is guessed from the extension, the available formats differ from platform to platform but should at least include ps, svg and pdf, on linux probably gif, png and jpg as well. """ self._connection._sendStringCmd( tc.CMD_SET_GUI_VARIABLE, tc.VAR_SCREENSHOT, viewID, filename)
screenshot(string, string) -> None Save a screenshot for the given view to the given filename. The fileformat is guessed from the extension, the available formats differ from platform to platform but should at least include ps, svg and pdf, on linux probably gif, png and jpg as well.
def with_headers(self, headers=None, **params): """ Add headers to the request. :param headers: A dict, or a list of key, value pairs :param params: A dict of key value pairs """ if isinstance(headers, (tuple, list)): headers = dict(headers) if params: if isinstance(headers, dict): headers.update(params) elif headers is None: headers = params self._headers.update(headers) return self
Add headers to the request. :param headers: A dict, or a list of key, value pairs :param params: A dict of key value pairs
def _init_metadata(self): """stub""" self._first_angle_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'first_angle'), 'element_label': 'First Angle', 'instructions': 'set boolean, is this a first angle projection', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_boolean_values': [False], 'syntax': 'BOOLEAN', }
stub
def _run_variantcall_batch_multicore(items, regions, final_file): """Run variant calling on a batch of items using multiple cores. """ batch_name = _get_batch_name(items) variantcaller = _get_batch_variantcaller(items) work_bams = [dd.get_work_bam(d) or dd.get_align_bam(d) for d in items] def split_fn(data): out = [] for region in regions: region = _region_to_coords(region) chrom, start, end = region region_str = "_".join(str(x) for x in region) out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom, "%s-%s.vcf.gz" % (batch_name, region_str)) out.append((region, work_bams, out_file)) return final_file, out parallel = {"type": "local", "num_jobs": dd.get_num_cores(items[0]), "cores_per_job": 1} run_parallel = dmulti.runner(parallel, items[0]["config"]) to_run = copy.deepcopy(items[0]) to_run["sam_ref"] = dd.get_ref_file(to_run) to_run["group_orig"] = items parallel_split_combine([[to_run]], split_fn, run_parallel, "variantcall_sample", "concat_variant_files", "vrn_file", ["region", "sam_ref", "config"]) return final_file
Run variant calling on a batch of items using multiple cores.
def update_custom_service_account(self, account, nickname, password): """ 修改客服帐号。 :param account: 客服账号的用户名 :param nickname: 客服账号的昵称 :param password: 客服账号的密码 :return: 返回的 JSON 数据包 """ return self.post( url="https://api.weixin.qq.com/customservice/kfaccount/update", data={ "kf_account": account, "nickname": nickname, "password": password } )
修改客服帐号。 :param account: 客服账号的用户名 :param nickname: 客服账号的昵称 :param password: 客服账号的密码 :return: 返回的 JSON 数据包
def _writeFile(cls, filePath, content, encoding = None): """Safe file writing. Most common mistakes are checked against and reported before write operation. After that, if anything unexpected happens, user won't be left without data or with corrupted one as this method writes to a temporary file and then simply renames it (which should be atomic operation according to POSIX but who knows how Ext4 really works. @see: http://lwn.net/Articles/322823/).""" filePath = os.path.realpath(filePath) log.debug(_("Real file path to write: %s" % filePath)) if encoding is None: encoding = File.DEFAULT_ENCODING try: encodedContent = ''.join(content).encode(encoding) except LookupError as msg: raise SubFileError(_("Unknown encoding name: '%s'.") % encoding) except UnicodeEncodeError: raise SubFileError( _("There are some characters in '%(file)s' that cannot be encoded to '%(enc)s'.") % {"file": filePath, "enc": encoding}) tmpFilePath = "%s.tmp" % filePath bakFilePath = "%s.bak" % filePath with open(tmpFilePath, 'wb') as f: f.write(encodedContent) # ensure that all data is on disk. # for performance reasons, we skip os.fsync(f.fileno()) f.flush() try: os.rename(filePath, bakFilePath) except FileNotFoundError: # there's nothing to move when filePath doesn't exist # note the Python bug: http://bugs.python.org/issue16074 pass os.rename(tmpFilePath, filePath) try: os.unlink(bakFilePath) except FileNotFoundError: pass
Safe file writing. Most common mistakes are checked against and reported before write operation. After that, if anything unexpected happens, user won't be left without data or with corrupted one as this method writes to a temporary file and then simply renames it (which should be atomic operation according to POSIX but who knows how Ext4 really works. @see: http://lwn.net/Articles/322823/).
def get_current_user(): """Get current user object from middleware""" thread_local = AutomatedLoggingMiddleware.thread_local if hasattr(thread_local, 'current_user'): user = thread_local.current_user if isinstance(user, AnonymousUser): user = None else: user = None return user
Get current user object from middleware
def group_members_add(self, device_group_id, body, **kwargs): # noqa: E501 """Add a device to a group # noqa: E501 Add one device to a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_add(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.group_members_add_with_http_info(device_group_id, body, **kwargs) # noqa: E501 else: (data) = self.group_members_add_with_http_info(device_group_id, body, **kwargs) # noqa: E501 return data
Add a device to a group # noqa: E501 Add one device to a group # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.group_members_add(device_group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str device_group_id: The ID of the group (required) :param DeviceGroupManipulation body: Body of the request (required) :return: DevicePage If the method is called asynchronously, returns the request thread.
def sendline(self, s=''): '''Wraps send(), sending string ``s`` to child process, with os.linesep automatically appended. Returns number of bytes written. ''' n = self.send(s) return n + self.send(self.linesep)
Wraps send(), sending string ``s`` to child process, with os.linesep automatically appended. Returns number of bytes written.
def input(self, file_data, **kwargs): """ Required by ply.yacc for this to quack (duck typing) like a ply lexer. :param str file_data: Contents of the file to lex. """ self.lex = lex.lex(module=self, **kwargs) self.tokens_queue = [] self.cur_indent = 0 # Hack to avoid tokenization bugs caused by files that do not end in a # new line. self.lex.input(file_data + '\n')
Required by ply.yacc for this to quack (duck typing) like a ply lexer. :param str file_data: Contents of the file to lex.
async def get_chat(self, chat_id: typing.Union[base.Integer, base.String]) -> types.Chat: """ Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Source: https://core.telegram.org/bots/api#getchat :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :return: Returns a Chat object on success :rtype: :obj:`types.Chat` """ payload = generate_payload(**locals()) result = await self.request(api.Methods.GET_CHAT, payload) return types.Chat(**result)
Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Source: https://core.telegram.org/bots/api#getchat :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :return: Returns a Chat object on success :rtype: :obj:`types.Chat`
def save(self, filename, strip_prefix=''): """Save parameters to file. Parameters ---------- filename : str Path to parameter file. strip_prefix : str, default '' Strip prefix from parameter names before saving. """ arg_dict = {} for param in self.values(): weight = param._reduce() if not param.name.startswith(strip_prefix): raise ValueError( "Prefix '%s' is to be striped before saving, but Parameter's " "name '%s' does not start with '%s'. " "this may be due to your Block shares parameters from other " "Blocks or you forgot to use 'with name_scope()' when creating " "child blocks. For more info on naming, please see " "http://mxnet.incubator.apache.org/tutorials/basic/naming.html"%( strip_prefix, param.name, strip_prefix)) arg_dict[param.name[len(strip_prefix):]] = weight ndarray.save(filename, arg_dict)
Save parameters to file. Parameters ---------- filename : str Path to parameter file. strip_prefix : str, default '' Strip prefix from parameter names before saving.
def construct_txt_file(self): """Construct the header of the txt file""" textlines = ['Prediction of noncovalent interactions for PDB structure %s' % self.mol.pymol_name.upper(), ] textlines.append("=" * len(textlines[0])) textlines.append('Created on %s using PLIP v%s\n' % (time.strftime("%Y/%m/%d"), __version__)) textlines.append('If you are using PLIP in your work, please cite:') textlines.append('Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.') textlines.append('Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\n') if len(self.excluded) != 0: textlines.append('Excluded molecules as ligands: %s\n' % ','.join([lig for lig in self.excluded])) if config.DNARECEPTOR: textlines.append('DNA/RNA in structure was chosen as the receptor part.\n') return textlines
Construct the header of the txt file
def _default_pad(self, data_type, num_elms): # @NoSelf ''' The default pad values by CDF data type ''' order = self._convert_option() if (data_type == 51 or data_type == 52): return str(' '*num_elms) if (data_type == 1) or (data_type == 41): pad_value = struct.pack(order+'b', -127) dt_string = 'i1' elif data_type == 2: pad_value = struct.pack(order+'h', -32767) dt_string = 'i2' elif data_type == 4: pad_value = struct.pack(order+'i', -2147483647) dt_string = 'i4' elif (data_type == 8) or (data_type == 33): pad_value = struct.pack(order+'q', -9223372036854775807) dt_string = 'i8' elif data_type == 11: pad_value = struct.pack(order+'B', 254) dt_string = 'u1' elif data_type == 12: pad_value = struct.pack(order+'H', 65534) dt_string = 'u2' elif data_type == 14: pad_value = struct.pack(order+'I', 4294967294) dt_string = 'u4' elif (data_type == 21) or (data_type == 44): pad_value = struct.pack(order+'f', -1.0E30) dt_string = 'f' elif (data_type == 22) or (data_type == 45) or (data_type == 31): pad_value = struct.pack(order+'d', -1.0E30) dt_string = 'd' else: # (data_type == 32): pad_value = struct.pack(order+'2d', *[-1.0E30, -1.0E30]) dt_string = 'c16' dt = np.dtype(dt_string) ret = np.frombuffer(pad_value, dtype=dt, count=1) ret.setflags('WRITEABLE') return ret
The default pad values by CDF data type
def get_qpimage(self, idx): """Return background-corrected QPImage of data at index `idx`""" if self._bgdata: # The user has explicitly chosen different background data # using `get_qpimage_raw`. qpi = super(SeriesHdf5Qpimage, self).get_qpimage(idx) else: # We can use the background data stored in the qpimage hdf5 file with self._qpseries() as qps: qpi = qps.get_qpimage(index=idx).copy() # Force meta data for key in self.meta_data: qpi[key] = self.meta_data[key] # set identifier qpi["identifier"] = self.get_identifier(idx) return qpi
Return background-corrected QPImage of data at index `idx`
def read_in_chunks(file_object, chunk_size=CHUNK_SIZE): """Generator to read a file piece by piece.""" while True: data = file_object.read(chunk_size) if not data: break yield data
Generator to read a file piece by piece.
def parse_hh_mm_ss(self): """Parses raw time :return: Time parsed """ split_count = self.raw.count(":") if split_count == 2: # hh:mm:ss return datetime.strptime(str(self.raw).strip(), "%H:%M:%S").time() elif split_count == 1: # mm:ss return datetime.strptime(str(self.raw).strip(), "%M:%S").time() return datetime.strptime(str(self.raw).strip(), "%S").time()
Parses raw time :return: Time parsed
def is_cached(self, path, saltenv='base', cachedir=None): ''' Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string ''' if path.startswith('salt://'): path, senv = salt.utils.url.parse(path) if senv: saltenv = senv escaped = True if salt.utils.url.is_escaped(path) else False # also strip escape character '|' localsfilesdest = os.path.join( self.opts['cachedir'], 'localfiles', path.lstrip('|/')) filesdest = os.path.join( self.opts['cachedir'], 'files', saltenv, path.lstrip('|/')) extrndest = self._extrn_path(path, saltenv, cachedir=cachedir) if os.path.exists(filesdest): return salt.utils.url.escape(filesdest) if escaped else filesdest elif os.path.exists(localsfilesdest): return salt.utils.url.escape(localsfilesdest) \ if escaped \ else localsfilesdest elif os.path.exists(extrndest): return extrndest return ''
Returns the full path to a file if it is cached locally on the minion otherwise returns a blank string
def _detect_available_configs(): """ Returns all currently used channels as well as one other currently unused channel. .. note:: This method will run into problems if thousands of autodetected busses are used at once. """ with channels_lock: available_channels = list(channels.keys()) # find a currently unused channel get_extra = lambda: "channel-{}".format(randint(0, 9999)) extra = get_extra() while extra in available_channels: extra = get_extra() available_channels += [extra] return [ {'interface': 'virtual', 'channel': channel} for channel in available_channels ]
Returns all currently used channels as well as one other currently unused channel. .. note:: This method will run into problems if thousands of autodetected busses are used at once.
def properties(self, rel_path=None): """ Return a dictionary with all svn-properties associated with a relative path. :param rel_path: relative path in the svn repo to query the properties from :returns: a dictionary with the property name as key and the content as value """ full_url_or_path = self.__url_or_path if rel_path is not None: full_url_or_path += '/' + rel_path result = self.run_command( 'proplist', ['--xml', full_url_or_path], do_combine=True) # query the proper list of this path root = xml.etree.ElementTree.fromstring(result) target_elem = root.find('target') property_names = [p.attrib["name"] for p in target_elem.findall('property')] # now query the content of each propery property_dict = {} for property_name in property_names: result = self.run_command( 'propget', ['--xml', property_name, full_url_or_path, ], do_combine=True) root = xml.etree.ElementTree.fromstring(result) target_elem = root.find('target') property_elem = target_elem.find('property') property_dict[property_name] = property_elem.text return property_dict
Return a dictionary with all svn-properties associated with a relative path. :param rel_path: relative path in the svn repo to query the properties from :returns: a dictionary with the property name as key and the content as value
def contents(self): """The raw file contents as a string.""" if not self._contents: if self._path: # Read file into memory so we don't run out of file descriptors f = open(self._path, "rb") self._contents = f.read() f.close() return self._contents
The raw file contents as a string.
def collect_iptable(self, tablename): """ When running the iptables command, it unfortunately auto-loads the modules before trying to get output. Some people explicitly don't want this, so check if the modules are loaded before running the command. If they aren't loaded, there can't possibly be any relevant rules in that table """ modname = "iptable_"+tablename if self.check_ext_prog("grep -q %s /proc/modules" % modname): cmd = "iptables -t "+tablename+" -nvL" self.add_cmd_output(cmd)
When running the iptables command, it unfortunately auto-loads the modules before trying to get output. Some people explicitly don't want this, so check if the modules are loaded before running the command. If they aren't loaded, there can't possibly be any relevant rules in that table
def run_script(module_name): '''Take script input (from script_input above), run the run() function, and render the results in the appropriate template''' filename = '' file_stream = '' if len(request.files) > 0: # Get the name of the uploaded file f = request.files['file_upload'] # Make the filename safe, remove unsupported chars filename = secure_filename(f.filename) file_stream = f.stream try: form = werkzeug.datastructures.MultiDict(request.form) form['HTTP_AUTHORIZATION'] = get_authorization() form['filename'] = filename form['file_stream'] = file_stream result = registered_modules[module_name].WebAPI().run(form) except Exception: if app.config['LOCAL_DEV'] == True: raise # pass along to be caught by Flask's debugger return render_template('error.html', scripts=registered_modules, module_name=module_name, error_message=traceback.format_exc()) output = result['output'] if 'output_type' in result: output_type = result['output_type'] else: if isinstance(output, basestring): output_type = 'simple' else: output_type = 'table' if result['output_type'] == 'custom': return render_template('result_custom.html', custom_output=Markup(result['output']), scripts=registered_modules, module_name=module_name) elif result['output_type'] == 'simple': return render_template('result.html', output=result['output'], scripts=registered_modules, module_name=module_name) elif result['output_type'] == 'file': return Response(result['output'], mimetype='application/octet-stream', headers={'Content-Disposition': 'attachment;filename='+result['filename']}) elif result['output_type'] == 'table': return render_template('result_table.html', output=result['output'], scripts=registered_modules, module_name=module_name, headers=result['headers'])
Take script input (from script_input above), run the run() function, and render the results in the appropriate template
def get_slab_stats(self): """Retrieve slab stats from memcached.""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((self.host, self.port)) s.send("stats slabs\n") try: data = "" while True: data += s.recv(4096) if data.endswith('END\r\n'): break return data finally: s.close()
Retrieve slab stats from memcached.
def WhatMustIUnderstand(self): '''Return a list of (uri,localname) tuples for all elements in the header that have mustUnderstand set. ''' return [ ( E.namespaceURI, E.localName ) for E in self.header_elements if _find_mu(E) == "1" ]
Return a list of (uri,localname) tuples for all elements in the header that have mustUnderstand set.
def _filter_namespaces_by_route_whitelist(self): """ Given a parsed API in IR form, filter the user-defined datatypes so that they include only the route datatypes and their direct dependencies. """ assert self._routes is not None, "Missing route whitelist" assert 'route_whitelist' in self._routes assert 'datatype_whitelist' in self._routes # Get route whitelist in canonical form route_whitelist = {} for namespace_name, route_reprs in self._routes['route_whitelist'].items(): new_route_reprs = [] if route_reprs == ['*']: namespace = self.api.namespaces[namespace_name] new_route_reprs = [route.name_with_version() for route in namespace.routes] else: for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) if version > 1: new_route_reprs.append('{}:{}'.format(route_name, version)) else: new_route_reprs.append(route_name) route_whitelist[namespace_name] = new_route_reprs # Parse the route whitelist and populate any starting data types route_data_types = [] for namespace_name, route_reprs in route_whitelist.items(): # Error out if user supplied nonexistent namespace if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) namespace = self.api.namespaces[namespace_name] # Parse namespace doc refs and add them to the starting data types if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) # Parse user-specified routes and add them to the starting data types # Note that this may add duplicates, but that's okay, as the recursion # keeps track of visited data types. assert '*' not in route_reprs for routes_repr in route_reprs: route_name, version = parse_route_name_and_version(routes_repr) if route_name not in namespace.routes_by_name or \ version not in namespace.routes_by_name[route_name].at_version: raise AssertionError('Route %s at version %d is not defined!' % (route_name, version)) route = namespace.routes_by_name[route_name].at_version[version] route_data_types.extend(namespace.get_route_io_data_types_for_route(route)) if route.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, route.doc, namespace_name)) # Parse the datatype whitelist and populate any starting data types for namespace_name, datatype_names in self._routes['datatype_whitelist'].items(): if namespace_name not in self.api.namespaces: raise AssertionError('Namespace %s is not defined!' % namespace_name) # Parse namespace doc refs and add them to the starting data types namespace = self.api.namespaces[namespace_name] if namespace.doc is not None: route_data_types.extend( parse_data_types_from_doc_ref(self.api, namespace.doc, namespace_name)) for datatype_name in datatype_names: if datatype_name not in self.api.namespaces[namespace_name].data_type_by_name: raise AssertionError('Datatype %s is not defined!' % datatype_name) data_type = self.api.namespaces[namespace_name].data_type_by_name[datatype_name] route_data_types.append(data_type) # Recurse on dependencies output_types_by_ns, output_routes_by_ns = self._find_dependencies(route_data_types) # Update the IR representation. This involves editing the data types and # routes for each namespace. for namespace in self.api.namespaces.values(): data_types = list(set(output_types_by_ns[namespace.name])) # defaults to empty list namespace.data_types = data_types namespace.data_type_by_name = {d.name: d for d in data_types} output_route_reprs = [output_route.name_with_version() for output_route in output_routes_by_ns[namespace.name]] if namespace.name in route_whitelist: whitelisted_route_reprs = route_whitelist[namespace.name] route_reprs = list(set(whitelisted_route_reprs + output_route_reprs)) else: route_reprs = output_route_reprs routes = [] for route_repr in route_reprs: route_name, version = parse_route_name_and_version(route_repr) route = namespace.routes_by_name[route_name].at_version[version] routes.append(route) namespace.routes = [] namespace.route_by_name = {} namespace.routes_by_name = {} for route in routes: namespace.add_route(route)
Given a parsed API in IR form, filter the user-defined datatypes so that they include only the route datatypes and their direct dependencies.
def has_perf_results(self): ''' Figure out if any submission for this assignment has performance data being available. ''' num_results = SubmissionTestResult.objects.filter(perf_data__isnull=False).filter(submission_file__submissions__assignment=self).count() return num_results != 0
Figure out if any submission for this assignment has performance data being available.
def display_dialog(self, *args, **kwargs): """Display form and success message when set""" form = kwargs.pop('form_instance', None) success_message = kwargs.pop('success_message', None) if not form: form = self.get_form_class()(initial=kwargs, instance=self.object) if not hasattr(form, "helper"): form.helper = FormHelper() form.helper.form_tag = False return { 'title': self.title.format( model_name=self.get_model_config().model_name, object=str(self.object) if self.object else '', ), 'content': self.render_to_string(self.template, { 'form': form, 'success_message': success_message, }), 'submit_label': self.submit_label, 'success': bool(success_message), }
Display form and success message when set
def set_row_stretch(self, row=0, stretch=10): """ Sets the row stretch. Larger numbers mean it will expand more to fill space. """ self._layout.setRowStretch(row, stretch) return self
Sets the row stretch. Larger numbers mean it will expand more to fill space.
def genHidden(self,nHidden=10,vTot=0.5,vCommon=0.1): """ generate """ vSpecific = vTot-vCommon # generate hidden X = self.genWeights(self.N,nHidden) # common effect H = self.genWeights(nHidden,self.P) Bc = SP.dot(H,self.genTraitEffect()) Yc = SP.dot(X,Bc) Yc *= SP.sqrt(vCommon/Yc.var(0).mean()) # indipendent effect Bi = SP.randn(nHidden,self.P) Yi = SP.dot(X,Bi) Yi *= SP.sqrt(vSpecific/Yi.var(0).mean()) return Yc,Yi
generate
def get_likelihood(self, uni_matrix): """Compute likelihood of the tree given an U matrix. Args: uni_matrix(numpy.array): univariate matrix to evaluate likelihood on. Returns: tuple[float, numpy.array]: likelihood of the current tree, next level conditional univariate matrix """ uni_dim = uni_matrix.shape[1] num_edge = len(self.edges) values = np.zeros([1, num_edge]) new_uni_matrix = np.empty([uni_dim, uni_dim]) for i in range(num_edge): edge = self.edges[i] value, left_u, right_u = edge.get_likelihood(uni_matrix) new_uni_matrix[edge.L, edge.R] = left_u new_uni_matrix[edge.R, edge.L] = right_u values[0, i] = np.log(value) return np.sum(values), new_uni_matrix
Compute likelihood of the tree given an U matrix. Args: uni_matrix(numpy.array): univariate matrix to evaluate likelihood on. Returns: tuple[float, numpy.array]: likelihood of the current tree, next level conditional univariate matrix
def render(n_frames=1, axis=np.array([0.,0.,1.]), clf=True, **kwargs): """Render frames from the viewer. Parameters ---------- n_frames : int Number of frames to render. If more than one, the scene will animate. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. Returns ------- list of perception.ColorImage A list of ColorImages rendered from the viewer. """ v = SceneViewer(Visualizer3D._scene, size=Visualizer3D._init_size, animate=(n_frames > 1), animate_axis=axis, max_frames=n_frames, **kwargs) if clf: Visualizer3D.clf() return v.saved_frames
Render frames from the viewer. Parameters ---------- n_frames : int Number of frames to render. If more than one, the scene will animate. axis : (3,) float or None If present, the animation will rotate about the given axis in world coordinates. Otherwise, the animation will rotate in azimuth. clf : bool If true, the Visualizer is cleared after rendering the figure. kwargs : dict Other keyword arguments for the SceneViewer instance. Returns ------- list of perception.ColorImage A list of ColorImages rendered from the viewer.
def parse_config(path): """parse either the config file we found, or use some canned defaults""" config = configparser.ConfigParser() if path: # if user has config with user creds in it, this will grab it config.read(path) try: return {k: v for k, v in config["default"].items()} except KeyError: return {}
parse either the config file we found, or use some canned defaults
def user_remove(self, domain, userid): """ Remove a user :param AuthDomain domain: The authentication domain for the user. :param userid: The user ID to remove :raise: :exc:`couchbase.exceptions.HTTPError` if the user does not exist. :return: :class:`~.HttpResult` """ path = self._get_management_path(domain, userid) return self.http_request(path=path, method='DELETE')
Remove a user :param AuthDomain domain: The authentication domain for the user. :param userid: The user ID to remove :raise: :exc:`couchbase.exceptions.HTTPError` if the user does not exist. :return: :class:`~.HttpResult`
def add_gate_option_group(parser): """Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance. """ gate_group = parser.add_argument_group("Options for gating data.") gate_group.add_argument("--gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data before " "filtering.") gate_group.add_argument("--gate-overwhitened", action="store_true", help="Overwhiten data first, then apply the " "gates specified in --gate. Overwhitening " "allows for sharper tapers to be used, " "since lines are not blurred.") gate_group.add_argument("--psd-gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data used " "for computing the PSD. Gates are applied " "prior to FFT-ing the data for PSD " "estimation.") return gate_group
Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance.
def _is_in_try_again(self, x, y): """Checks if the game is to be restarted.""" if self.won == 1: # Checks if in try button on won screen. x1, y1, x2, y2 = self._won_try_again return x1 <= x < x2 and y1 <= y < y2 elif self.lost: # Checks if in try button on lost screen. x1, y1, x2, y2 = self._lost_try_again return x1 <= x < x2 and y1 <= y < y2 # Otherwise just no. return False
Checks if the game is to be restarted.
def _ReadString( self, file_object, file_offset, data_type_map, description): """Reads a string. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_type_map (dtfabric.DataTypeMap): data type map of the string. description (str): description of the string. Returns: object: structure values object. Raises: FileFormatError: if the string cannot be read. ValueError: if file-like object or date type map are invalid. """ # pylint: disable=protected-access element_data_size = ( data_type_map._element_data_type_definition.GetByteSize()) elements_terminator = ( data_type_map._data_type_definition.elements_terminator) byte_stream = [] element_data = file_object.read(element_data_size) byte_stream.append(element_data) while element_data and element_data != elements_terminator: element_data = file_object.read(element_data_size) byte_stream.append(element_data) byte_stream = b''.join(byte_stream) return self._ReadStructureFromByteStream( byte_stream, file_offset, data_type_map, description)
Reads a string. Args: file_object (FileIO): file-like object. file_offset (int): offset of the data relative from the start of the file-like object. data_type_map (dtfabric.DataTypeMap): data type map of the string. description (str): description of the string. Returns: object: structure values object. Raises: FileFormatError: if the string cannot be read. ValueError: if file-like object or date type map are invalid.
def routingAreaUpdateAccept(PTmsiSignature_presence=0, MobileId_presence=0, MobileId_presence1=0, ReceiveNpduNumbersList_presence=0, GprsTimer_presence=0, GmmCause_presence=0): """ROUTING AREA UPDATE ACCEPT Section 9.4.15""" a = TpPd(pd=0x3) b = MessageType(mesType=0x9) # 00001001 c = ForceToStandbyAndUpdateResult() e = GprsTimer() f = RoutingAreaIdentification() packet = a / b / c / e / f if PTmsiSignature_presence is 1: g = PTmsiSignature(ieiPTS=0x19) packet = packet / g if MobileId_presence is 1: h = MobileIdHdr(ieiMI=0x18, eightBitMI=0x0) packet = packet / h if MobileId_presence1 is 1: i = MobileIdHdr(ieiMI=0x23, eightBitMI=0x0) packet = packet / i if ReceiveNpduNumbersList_presence is 1: j = ReceiveNpduNumbersList(ieiRNNL=0x26) packet = packet / j if GprsTimer_presence is 1: k = GprsTimer(ieiGT=0x17) packet = packet / k if GmmCause_presence is 1: l = GmmCause(ieiGC=0x25) packet = packet / l return packet
ROUTING AREA UPDATE ACCEPT Section 9.4.15
def set_chat_photo(self, *args, **kwargs): """See :func:`set_chat_photo`""" return set_chat_photo(*args, **self._merge_overrides(**kwargs)).run()
See :func:`set_chat_photo`
async def publish(self, message): """Push a new message to the client. The data will be available as a JSON object with the key ``data``. """ try: self.write_message(dict(data=message)) except WebSocketClosedError: self._close()
Push a new message to the client. The data will be available as a JSON object with the key ``data``.
def get(self, request, bot_id, id, format=None): """ Get hook by id --- serializer: HookSerializer responseMessages: - code: 401 message: Not authenticated """ return super(HookDetail, self).get(request, bot_id, id, format)
Get hook by id --- serializer: HookSerializer responseMessages: - code: 401 message: Not authenticated
def taskotron_task_particular_or_changed_outcome(config, message, outcome='FAILED,NEEDS_INSPECTION'): """ Taskotron task any particular or changed outcome(s) With this rule, you can limit messages to only those task results with any particular outcome(s) (FAILED and NEEDS_INSPECTION by default) or those with changed outcomes. This rule is a handy way of filtering a very useful use case - being notified when either task requires your attention or the outcome has changed since the last time the task ran for the same item (e.g. a koji build). You can specify several outcomes by separating them with a comma ',', i.e.: ``PASSED,FAILED``. The full list of supported outcomes can be found in the libtaskotron `documentation <https://docs.qadevel.cloud.fedoraproject.org/ libtaskotron/latest/resultyaml.html#minimal-version>`_. """ return taskotron_task_outcome(config, message, outcome) or \ taskotron_changed_outcome(config, message)
Taskotron task any particular or changed outcome(s) With this rule, you can limit messages to only those task results with any particular outcome(s) (FAILED and NEEDS_INSPECTION by default) or those with changed outcomes. This rule is a handy way of filtering a very useful use case - being notified when either task requires your attention or the outcome has changed since the last time the task ran for the same item (e.g. a koji build). You can specify several outcomes by separating them with a comma ',', i.e.: ``PASSED,FAILED``. The full list of supported outcomes can be found in the libtaskotron `documentation <https://docs.qadevel.cloud.fedoraproject.org/ libtaskotron/latest/resultyaml.html#minimal-version>`_.
def _GetDayOfYear(self, year, month, day_of_month): """Retrieves the day of the year for a specific day of a month in a year. Args: year (int): year e.g. 1970. month (int): month, where 1 represents January. day_of_month (int): day of the month, where 1 represents the first day. Returns: int: day of year. Raises: ValueError: if the month or day of month value is out of bounds. """ if month not in range(1, 13): raise ValueError('Month value out of bounds.') days_per_month = self._GetDaysPerMonth(year, month) if day_of_month < 1 or day_of_month > days_per_month: raise ValueError('Day of month value out of bounds.') day_of_year = day_of_month for past_month in range(1, month): day_of_year += self._GetDaysPerMonth(year, past_month) return day_of_year
Retrieves the day of the year for a specific day of a month in a year. Args: year (int): year e.g. 1970. month (int): month, where 1 represents January. day_of_month (int): day of the month, where 1 represents the first day. Returns: int: day of year. Raises: ValueError: if the month or day of month value is out of bounds.
def check_compatibility(datasets, reqd_num_features=None): """ Checks whether the given MLdataset instances are compatible i.e. with same set of subjects, each beloning to the same class in all instances. Checks the first dataset in the list against the rest, and returns a boolean array. Parameters ---------- datasets : Iterable A list of n datasets reqd_num_features : int The required number of features in each dataset. Helpful to ensure test sets are compatible with training set, as well as within themselves. Returns ------- all_are_compatible : bool Boolean flag indicating whether all datasets are compatible or not compatibility : list List indicating whether first dataset is compatible with the rest individually. This could be useful to select a subset of mutually compatible datasets. Length : n-1 dim_mismatch : bool Boolean flag indicating mismatch in dimensionality from that specified size_descriptor : tuple A tuple with values for (num_samples, reqd_num_features) - num_samples must be common for all datasets that are evaluated for compatibility - reqd_num_features is None (when no check on dimensionality is perfomed), or list of corresponding dimensionalities for each input dataset """ from collections import Iterable if not isinstance(datasets, Iterable): raise TypeError('Input must be an iterable ' 'i.e. (list/tuple) of MLdataset/similar instances') datasets = list(datasets) # to make it indexable if coming from a set num_datasets = len(datasets) check_dimensionality = False dim_mismatch = False if reqd_num_features is not None: if isinstance(reqd_num_features, Iterable): if len(reqd_num_features) != num_datasets: raise ValueError('Specify dimensionality for exactly {} datasets.' ' Given for a different number {}' ''.format(num_datasets, len(reqd_num_features))) reqd_num_features = list(map(int, reqd_num_features)) else: # same dimensionality for all reqd_num_features = [int(reqd_num_features)] * num_datasets check_dimensionality = True else: # to enable iteration reqd_num_features = [None,] * num_datasets pivot = datasets[0] if not isinstance(pivot, MLDataset): pivot = MLDataset(pivot) if check_dimensionality and pivot.num_features != reqd_num_features[0]: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_num_features[0], pivot.num_features)) dim_mismatch = True compatible = list() for ds, reqd_dim in zip(datasets[1:], reqd_num_features[1:]): if not isinstance(ds, MLDataset): ds = MLDataset(ds) is_compatible = True # compound bool will short-circuit, not optim required if pivot.num_samples != ds.num_samples \ or pivot.keys != ds.keys \ or pivot.classes != ds.classes: is_compatible = False if check_dimensionality and reqd_dim != ds.num_features: warnings.warn('Dimensionality mismatch! Expected {} whereas current {}.' ''.format(reqd_dim, ds.num_features)) dim_mismatch = True compatible.append(is_compatible) return all(compatible), compatible, dim_mismatch, \ (pivot.num_samples, reqd_num_features)
Checks whether the given MLdataset instances are compatible i.e. with same set of subjects, each beloning to the same class in all instances. Checks the first dataset in the list against the rest, and returns a boolean array. Parameters ---------- datasets : Iterable A list of n datasets reqd_num_features : int The required number of features in each dataset. Helpful to ensure test sets are compatible with training set, as well as within themselves. Returns ------- all_are_compatible : bool Boolean flag indicating whether all datasets are compatible or not compatibility : list List indicating whether first dataset is compatible with the rest individually. This could be useful to select a subset of mutually compatible datasets. Length : n-1 dim_mismatch : bool Boolean flag indicating mismatch in dimensionality from that specified size_descriptor : tuple A tuple with values for (num_samples, reqd_num_features) - num_samples must be common for all datasets that are evaluated for compatibility - reqd_num_features is None (when no check on dimensionality is perfomed), or list of corresponding dimensionalities for each input dataset
def disable_branching_model(self, project, repository): """ Disable branching model :param project: :param repository: :return: """ url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branchmodel/configuration'.format( project=project, repository=repository) return self.delete(url)
Disable branching model :param project: :param repository: :return:
def sync_headers(cloud_obj, headers=None, header_patterns=HEADER_PATTERNS): """ Overwrites the given cloud_obj's headers with the ones given as ``headers` and adds additional headers as defined in the HEADERS setting depending on the cloud_obj's file name. """ if headers is None: headers = {} # don't set headers on directories content_type = getattr(cloud_obj, "content_type", None) if content_type == "application/directory": return matched_headers = {} for pattern, pattern_headers in header_patterns: if pattern.match(cloud_obj.name): matched_headers.update(pattern_headers.copy()) # preserve headers already set matched_headers.update(cloud_obj.headers) # explicitly set headers overwrite matches and already set headers matched_headers.update(headers) if matched_headers != cloud_obj.headers: cloud_obj.headers = matched_headers cloud_obj.sync_metadata()
Overwrites the given cloud_obj's headers with the ones given as ``headers` and adds additional headers as defined in the HEADERS setting depending on the cloud_obj's file name.
def get_value(self, default=None): """Get selection from widget. Parameters ---------- default : str str for use by widget Returns ------- str selected item from the combobox """ if default is None: default = '' try: text = self.currentText() except ValueError: lg.debug('Cannot convert "' + str(text) + '" to list. ' + 'Using default ' + str(default)) text = default self.set_value(text) return text
Get selection from widget. Parameters ---------- default : str str for use by widget Returns ------- str selected item from the combobox
def update(self, pk, data, **kwargs): """ Updates the object by primary_key: .. code-block:: python DBSession.sacrud(Users).update(1, {'name': 'Petya'}) DBSession.sacrud(Users).update('1', {'name': 'Petya'}) DBSession.sacrud(User2Groups).update({'user_id': 4, 'group_id': 2}, {'group_id': 1}) JSON support: .. code-block:: python DBSession.sacrud(Users).update(1, '{"name": "Petya"}') DBSession.sacrud(User2Groups).update( '{"user_id": 4, "group_id": 2}', # primary_key '{"group_id": 1}' # data ) Default it run ``session.commit() or transaction.commit()``. If it is not necessary use attribute ``commit=False``. """ pk = unjson(pk) data = unjson(data) obj = get_obj(self.session, self.table, pk) return self._add(obj, data, **kwargs)
Updates the object by primary_key: .. code-block:: python DBSession.sacrud(Users).update(1, {'name': 'Petya'}) DBSession.sacrud(Users).update('1', {'name': 'Petya'}) DBSession.sacrud(User2Groups).update({'user_id': 4, 'group_id': 2}, {'group_id': 1}) JSON support: .. code-block:: python DBSession.sacrud(Users).update(1, '{"name": "Petya"}') DBSession.sacrud(User2Groups).update( '{"user_id": 4, "group_id": 2}', # primary_key '{"group_id": 1}' # data ) Default it run ``session.commit() or transaction.commit()``. If it is not necessary use attribute ``commit=False``.
def patched_web3_eth_estimate_gas(self, transaction, block_identifier=None): """ Temporary workaround until next web3.py release (5.X.X) Current master of web3.py has this implementation already: https://github.com/ethereum/web3.py/blob/2a67ea9f0ab40bb80af2b803dce742d6cad5943e/web3/eth.py#L311 """ if 'from' not in transaction and is_checksum_address(self.defaultAccount): transaction = assoc(transaction, 'from', self.defaultAccount) if block_identifier is None: params = [transaction] else: params = [transaction, block_identifier] try: result = self.web3.manager.request_blocking( 'eth_estimateGas', params, ) except ValueError as e: if check_value_error_for_parity(e, ParityCallType.ESTIMATE_GAS): result = None else: # else the error is not denoting estimate gas failure and is something else raise e return result
Temporary workaround until next web3.py release (5.X.X) Current master of web3.py has this implementation already: https://github.com/ethereum/web3.py/blob/2a67ea9f0ab40bb80af2b803dce742d6cad5943e/web3/eth.py#L311
def system_supports_plotting(): """ Check if x server is running Returns ------- system_supports_plotting : bool True when on Linux and running an xserver. Returns None when on a non-linux platform. """ try: if os.environ['ALLOW_PLOTTING'].lower() == 'true': return True except KeyError: pass try: p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE) p.communicate() return p.returncode == 0 except: return False
Check if x server is running Returns ------- system_supports_plotting : bool True when on Linux and running an xserver. Returns None when on a non-linux platform.
def render_template(self, template_parameters, template_id): """RenderTemplate. [Preview API] :param :class:`<TemplateParameters> <azure.devops.v5_1.cix.models.TemplateParameters>` template_parameters: :param str template_id: :rtype: :class:`<Template> <azure.devops.v5_1.cix.models.Template>` """ route_values = {} if template_id is not None: route_values['templateId'] = self._serialize.url('template_id', template_id, 'str') content = self._serialize.body(template_parameters, 'TemplateParameters') response = self._send(http_method='POST', location_id='eb5d6d1d-98a2-4bbd-9028-f9a6b2d66515', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('Template', response)
RenderTemplate. [Preview API] :param :class:`<TemplateParameters> <azure.devops.v5_1.cix.models.TemplateParameters>` template_parameters: :param str template_id: :rtype: :class:`<Template> <azure.devops.v5_1.cix.models.Template>`
def cublasDsyr2(handle, uplo, n, alpha, x, incx, y, incy, A, lda): """ Rank-2 operation on real symmetric matrix. """ status = _libcublas.cublasDsyr2_v2(handle, _CUBLAS_FILL_MODE[uplo], n, ctypes.byref(ctypes.c_double(alpha)), int(x), incx, int(y), incy, int(A), lda) cublasCheckStatus(status)
Rank-2 operation on real symmetric matrix.
def pathencode (path): """Encode a path string with the platform file system encoding.""" if isinstance(path, unicode) and not os.path.supports_unicode_filenames: path = path.encode(FSCODING, "replace") return path
Encode a path string with the platform file system encoding.
def get_identity(self, subject_id, entities=None): """ Get all the identity information that has been received and are still valid about the subject. :param subject_id: The identifier of the subject :param entities: The identifiers of the entities whoes assertions are interesting. If the list is empty all entities are interesting. :return: A 2-tuple consisting of the identity information (a dictionary of attributes and values) and the list of entities whoes information has timed out. """ if not entities: entities = self.entities(subject_id) if not entities: return {}, [] res = {} oldees = [] for (entity_id, item) in self._cache.get_multi(entities, subject_id+'_').items(): try: info = self.get_info(item) except ToOld: oldees.append(entity_id) continue for key, vals in info["ava"].items(): try: tmp = set(res[key]).union(set(vals)) res[key] = list(tmp) except KeyError: res[key] = vals return res, oldees
Get all the identity information that has been received and are still valid about the subject. :param subject_id: The identifier of the subject :param entities: The identifiers of the entities whoes assertions are interesting. If the list is empty all entities are interesting. :return: A 2-tuple consisting of the identity information (a dictionary of attributes and values) and the list of entities whoes information has timed out.
def reload_all_manifests(self): """ Reloads all loaded manifests, and loads any new manifests """ self._logger.debug("Reloading all manifests.") self._manifests = [] self.load_manifests() self._logger.debug("All manifests reloaded.")
Reloads all loaded manifests, and loads any new manifests
def overlay_gateway_site_bfd_params_bfd_shutdown(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') site = ET.SubElement(overlay_gateway, "site") name_key = ET.SubElement(site, "name") name_key.text = kwargs.pop('name') bfd = ET.SubElement(site, "bfd") params = ET.SubElement(bfd, "params") bfd_shutdown = ET.SubElement(params, "bfd-shutdown") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def dumpBlock(self, block_name): """ API the list all information related with the block_name :param block_name: Name of block to be dumped (Required) :type block_name: str """ try: return self.dbsBlock.dumpBlock(block_name) except HTTPError as he: raise he except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/dumpBlock. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
API the list all information related with the block_name :param block_name: Name of block to be dumped (Required) :type block_name: str
def locationUpdatingAccept(MobileId_presence=0, FollowOnProceed_presence=0, CtsPermission_presence=0): """LOCATION UPDATING ACCEPT Section 9.2.13""" a = TpPd(pd=0x5) b = MessageType(mesType=0x02) # 00000010 c = LocalAreaId() packet = a / b / c if MobileId_presence is 1: d = MobileIdHdr(ieiMI=0x17, eightBitMI=0x0) packet = packet / d if FollowOnProceed_presence is 1: e = FollowOnProceed(ieiFOP=0xA1) packet = packet / e if CtsPermission_presence is 1: f = CtsPermissionHdr(ieiCP=0xA2, eightBitCP=0x0) packet = packet / f return packet
LOCATION UPDATING ACCEPT Section 9.2.13
def contains(self, key): '''Returns whether the object named by `key` exists. LoggingDatastore logs the access. ''' self.logger.info('%s: contains %s' % (self, key)) return super(LoggingDatastore, self).contains(key)
Returns whether the object named by `key` exists. LoggingDatastore logs the access.