code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def getsuffix(subject): """ Returns the suffix of a filename. If the file has no suffix, returns None. Can return an empty string if the filenam ends with a period. """ index = subject.rfind('.') if index > subject.replace('\\', '/').rfind('/'): return subject[index+1:] return None
Returns the suffix of a filename. If the file has no suffix, returns None. Can return an empty string if the filenam ends with a period.
Below is the the instruction that describes the task: ### Input: Returns the suffix of a filename. If the file has no suffix, returns None. Can return an empty string if the filenam ends with a period. ### Response: def getsuffix(subject): """ Returns the suffix of a filename. If the file has no suffix, returns None. Can return an empty string if the filenam ends with a period. """ index = subject.rfind('.') if index > subject.replace('\\', '/').rfind('/'): return subject[index+1:] return None
def add_variable(self, node): """Add a variable node to this node. :sig: (VariableNode) -> None :param node: Variable node to add. """ if node.name not in self.variable_names: self.variables.append(node) self.variable_names.add(node.name) node.parent = self
Add a variable node to this node. :sig: (VariableNode) -> None :param node: Variable node to add.
Below is the the instruction that describes the task: ### Input: Add a variable node to this node. :sig: (VariableNode) -> None :param node: Variable node to add. ### Response: def add_variable(self, node): """Add a variable node to this node. :sig: (VariableNode) -> None :param node: Variable node to add. """ if node.name not in self.variable_names: self.variables.append(node) self.variable_names.add(node.name) node.parent = self
def get_formats( self, token: dict = None, format_code: str = None, prot: str = "https" ) -> dict: """Get formats. :param str token: API auth token :param str format_code: code of a specific format :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ # if specific format if isinstance(format_code, str): specific_format = "/{}".format(format_code) else: specific_format = "" # search request req_url = "{}://v1.{}.isogeo.com/formats{}".format( prot, self.api_url, specific_format ) req = self.get( req_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) # checking response checker.check_api_response(req) # end of method return req.json()
Get formats. :param str token: API auth token :param str format_code: code of a specific format :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs).
Below is the the instruction that describes the task: ### Input: Get formats. :param str token: API auth token :param str format_code: code of a specific format :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). ### Response: def get_formats( self, token: dict = None, format_code: str = None, prot: str = "https" ) -> dict: """Get formats. :param str token: API auth token :param str format_code: code of a specific format :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs). """ # if specific format if isinstance(format_code, str): specific_format = "/{}".format(format_code) else: specific_format = "" # search request req_url = "{}://v1.{}.isogeo.com/formats{}".format( prot, self.api_url, specific_format ) req = self.get( req_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) # checking response checker.check_api_response(req) # end of method return req.json()
def variants(institute_id, case_name): """Display a list of SNV variants.""" page = int(request.form.get('page', 1)) institute_obj, case_obj = institute_and_case(store, institute_id, case_name) variant_type = request.args.get('variant_type', 'clinical') # Update filter settings if Clinical Filter was requested default_panels = [] for panel in case_obj['panels']: if panel.get('is_default'): default_panels.append(panel['panel_name']) request.form.get('gene_panels') if bool(request.form.get('clinical_filter')): clinical_filter = MultiDict({ 'variant_type': 'clinical', 'region_annotations': ['exonic','splicing'], 'functional_annotations': SEVERE_SO_TERMS, 'clinsig': [4,5], 'clinsig_confident_always_returned': True, 'gnomad_frequency': str(institute_obj['frequency_cutoff']), 'variant_type': 'clinical', 'gene_panels': default_panels }) if(request.method == "POST"): if bool(request.form.get('clinical_filter')): form = FiltersForm(clinical_filter) form.csrf_token = request.args.get('csrf_token') else: form = FiltersForm(request.form) else: form = FiltersForm(request.args) # populate available panel choices available_panels = case_obj.get('panels', []) + [ {'panel_name': 'hpo', 'display_name': 'HPO'}] panel_choices = [(panel['panel_name'], panel['display_name']) for panel in available_panels] form.gene_panels.choices = panel_choices # upload gene panel if symbol file exists if (request.files): file = request.files[form.symbol_file.name] if request.files and file and file.filename != '': log.debug("Upload file request files: {0}".format(request.files.to_dict())) try: stream = io.StringIO(file.stream.read().decode('utf-8'), newline=None) except UnicodeDecodeError as error: flash("Only text files are supported!", 'warning') return redirect(request.referrer) hgnc_symbols_set = set(form.hgnc_symbols.data) log.debug("Symbols prior to upload: {0}".format(hgnc_symbols_set)) new_hgnc_symbols = controllers.upload_panel(store, institute_id, case_name, stream) hgnc_symbols_set.update(new_hgnc_symbols) form.hgnc_symbols.data = hgnc_symbols_set # reset gene panels form.gene_panels.data = '' # update status of case if vistited for the first time if case_obj['status'] == 'inactive' and not current_user.is_admin: flash('You just activated this case!', 'info') user_obj = store.user(current_user.email) case_link = url_for('cases.case', institute_id=institute_obj['_id'], case_name=case_obj['display_name']) store.update_status(institute_obj, case_obj, user_obj, 'active', case_link) # check if supplied gene symbols exist hgnc_symbols = [] non_clinical_symbols = [] not_found_symbols = [] not_found_ids = [] if (form.hgnc_symbols.data) and len(form.hgnc_symbols.data) > 0: is_clinical = form.data.get('variant_type', 'clinical') == 'clinical' clinical_symbols = store.clinical_symbols(case_obj) if is_clinical else None for hgnc_symbol in form.hgnc_symbols.data: if hgnc_symbol.isdigit(): hgnc_gene = store.hgnc_gene(int(hgnc_symbol)) if hgnc_gene is None: not_found_ids.append(hgnc_symbol) else: hgnc_symbols.append(hgnc_gene['hgnc_symbol']) elif store.hgnc_genes(hgnc_symbol).count() == 0: not_found_symbols.append(hgnc_symbol) elif is_clinical and (hgnc_symbol not in clinical_symbols): non_clinical_symbols.append(hgnc_symbol) else: hgnc_symbols.append(hgnc_symbol) if (not_found_ids): flash("HGNC id not found: {}".format(", ".join(not_found_ids)), 'warning') if (not_found_symbols): flash("HGNC symbol not found: {}".format(", ".join(not_found_symbols)), 'warning') if (non_clinical_symbols): flash("Gene not included in clinical list: {}".format(", ".join(non_clinical_symbols)), 'warning') form.hgnc_symbols.data = hgnc_symbols # handle HPO gene list separately if form.data['gene_panels'] == ['hpo']: hpo_symbols = list(set(term_obj['hgnc_symbol'] for term_obj in case_obj['dynamic_gene_list'])) form.hgnc_symbols.data = hpo_symbols variants_query = store.variants(case_obj['_id'], query=form.data) data = {} if request.form.get('export'): document_header = controllers.variants_export_header(case_obj) export_lines = [] if form.data['chrom'] == 'MT': # Return all MT variants export_lines = controllers.variant_export_lines(store, case_obj, variants_query) else: # Return max 500 variants export_lines = controllers.variant_export_lines(store, case_obj, variants_query.limit(500)) def generate(header, lines): yield header + '\n' for line in lines: yield line + '\n' headers = Headers() headers.add('Content-Disposition','attachment', filename=str(case_obj['display_name'])+'-filtered_variants.csv') # return a csv with the exported variants return Response(generate(",".join(document_header), export_lines), mimetype='text/csv', headers=headers) data = controllers.variants(store, institute_obj, case_obj, variants_query, page) return dict(institute=institute_obj, case=case_obj, form=form, severe_so_terms=SEVERE_SO_TERMS, page=page, **data)
Display a list of SNV variants.
Below is the the instruction that describes the task: ### Input: Display a list of SNV variants. ### Response: def variants(institute_id, case_name): """Display a list of SNV variants.""" page = int(request.form.get('page', 1)) institute_obj, case_obj = institute_and_case(store, institute_id, case_name) variant_type = request.args.get('variant_type', 'clinical') # Update filter settings if Clinical Filter was requested default_panels = [] for panel in case_obj['panels']: if panel.get('is_default'): default_panels.append(panel['panel_name']) request.form.get('gene_panels') if bool(request.form.get('clinical_filter')): clinical_filter = MultiDict({ 'variant_type': 'clinical', 'region_annotations': ['exonic','splicing'], 'functional_annotations': SEVERE_SO_TERMS, 'clinsig': [4,5], 'clinsig_confident_always_returned': True, 'gnomad_frequency': str(institute_obj['frequency_cutoff']), 'variant_type': 'clinical', 'gene_panels': default_panels }) if(request.method == "POST"): if bool(request.form.get('clinical_filter')): form = FiltersForm(clinical_filter) form.csrf_token = request.args.get('csrf_token') else: form = FiltersForm(request.form) else: form = FiltersForm(request.args) # populate available panel choices available_panels = case_obj.get('panels', []) + [ {'panel_name': 'hpo', 'display_name': 'HPO'}] panel_choices = [(panel['panel_name'], panel['display_name']) for panel in available_panels] form.gene_panels.choices = panel_choices # upload gene panel if symbol file exists if (request.files): file = request.files[form.symbol_file.name] if request.files and file and file.filename != '': log.debug("Upload file request files: {0}".format(request.files.to_dict())) try: stream = io.StringIO(file.stream.read().decode('utf-8'), newline=None) except UnicodeDecodeError as error: flash("Only text files are supported!", 'warning') return redirect(request.referrer) hgnc_symbols_set = set(form.hgnc_symbols.data) log.debug("Symbols prior to upload: {0}".format(hgnc_symbols_set)) new_hgnc_symbols = controllers.upload_panel(store, institute_id, case_name, stream) hgnc_symbols_set.update(new_hgnc_symbols) form.hgnc_symbols.data = hgnc_symbols_set # reset gene panels form.gene_panels.data = '' # update status of case if vistited for the first time if case_obj['status'] == 'inactive' and not current_user.is_admin: flash('You just activated this case!', 'info') user_obj = store.user(current_user.email) case_link = url_for('cases.case', institute_id=institute_obj['_id'], case_name=case_obj['display_name']) store.update_status(institute_obj, case_obj, user_obj, 'active', case_link) # check if supplied gene symbols exist hgnc_symbols = [] non_clinical_symbols = [] not_found_symbols = [] not_found_ids = [] if (form.hgnc_symbols.data) and len(form.hgnc_symbols.data) > 0: is_clinical = form.data.get('variant_type', 'clinical') == 'clinical' clinical_symbols = store.clinical_symbols(case_obj) if is_clinical else None for hgnc_symbol in form.hgnc_symbols.data: if hgnc_symbol.isdigit(): hgnc_gene = store.hgnc_gene(int(hgnc_symbol)) if hgnc_gene is None: not_found_ids.append(hgnc_symbol) else: hgnc_symbols.append(hgnc_gene['hgnc_symbol']) elif store.hgnc_genes(hgnc_symbol).count() == 0: not_found_symbols.append(hgnc_symbol) elif is_clinical and (hgnc_symbol not in clinical_symbols): non_clinical_symbols.append(hgnc_symbol) else: hgnc_symbols.append(hgnc_symbol) if (not_found_ids): flash("HGNC id not found: {}".format(", ".join(not_found_ids)), 'warning') if (not_found_symbols): flash("HGNC symbol not found: {}".format(", ".join(not_found_symbols)), 'warning') if (non_clinical_symbols): flash("Gene not included in clinical list: {}".format(", ".join(non_clinical_symbols)), 'warning') form.hgnc_symbols.data = hgnc_symbols # handle HPO gene list separately if form.data['gene_panels'] == ['hpo']: hpo_symbols = list(set(term_obj['hgnc_symbol'] for term_obj in case_obj['dynamic_gene_list'])) form.hgnc_symbols.data = hpo_symbols variants_query = store.variants(case_obj['_id'], query=form.data) data = {} if request.form.get('export'): document_header = controllers.variants_export_header(case_obj) export_lines = [] if form.data['chrom'] == 'MT': # Return all MT variants export_lines = controllers.variant_export_lines(store, case_obj, variants_query) else: # Return max 500 variants export_lines = controllers.variant_export_lines(store, case_obj, variants_query.limit(500)) def generate(header, lines): yield header + '\n' for line in lines: yield line + '\n' headers = Headers() headers.add('Content-Disposition','attachment', filename=str(case_obj['display_name'])+'-filtered_variants.csv') # return a csv with the exported variants return Response(generate(",".join(document_header), export_lines), mimetype='text/csv', headers=headers) data = controllers.variants(store, institute_obj, case_obj, variants_query, page) return dict(institute=institute_obj, case=case_obj, form=form, severe_so_terms=SEVERE_SO_TERMS, page=page, **data)
def _expand_authorized_keys_path(path, user, home): ''' Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5) ''' converted_path = '' had_escape = False for char in path: if had_escape: had_escape = False if char == '%': converted_path += '%' elif char == 'u': converted_path += user elif char == 'h': converted_path += home else: error = 'AuthorizedKeysFile path: unknown token character "%{0}"'.format(char) raise CommandExecutionError(error) continue elif char == '%': had_escape = True else: converted_path += char if had_escape: error = "AuthorizedKeysFile path: Last character can't be escape character" raise CommandExecutionError(error) return converted_path
Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5)
Below is the the instruction that describes the task: ### Input: Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5) ### Response: def _expand_authorized_keys_path(path, user, home): ''' Expand the AuthorizedKeysFile expression. Defined in man sshd_config(5) ''' converted_path = '' had_escape = False for char in path: if had_escape: had_escape = False if char == '%': converted_path += '%' elif char == 'u': converted_path += user elif char == 'h': converted_path += home else: error = 'AuthorizedKeysFile path: unknown token character "%{0}"'.format(char) raise CommandExecutionError(error) continue elif char == '%': had_escape = True else: converted_path += char if had_escape: error = "AuthorizedKeysFile path: Last character can't be escape character" raise CommandExecutionError(error) return converted_path
def spliced_offset(self, position): """ Convert from an absolute chromosomal position to the offset into this transcript"s spliced mRNA. Position must be inside some exon (otherwise raise exception). """ # this code is performance sensitive, so switching from # typechecks.require_integer to a simpler assertion assert type(position) == int, \ "Position argument must be an integer, got %s : %s" % ( position, type(position)) if position < self.start or position > self.end: raise ValueError( "Invalid position: %d (must be between %d and %d)" % ( position, self.start, self.end)) # offset from beginning of unspliced transcript (including introns) unspliced_offset = self.offset(position) total_spliced_offset = 0 # traverse exons in order of their appearance on the strand # Since absolute positions may decrease if on the negative strand, # we instead use unspliced offsets to get always increasing indices. # # Example: # # Exon Name: exon 1 exon 2 # Spliced Offset: 123456 789... # Intron vs. Exon: ...iiiiiieeeeeeiiiiiiiiiiiiiiiieeeeeeiiiiiiiiiii... for exon in self.exons: exon_unspliced_start, exon_unspliced_end = self.offset_range( exon.start, exon.end) # If the relative position is not within this exon, keep a running # total of the total exonic length-so-far. # # Otherwise, if the relative position is within an exon, get its # offset into that exon by subtracting the exon"s relative start # position from the relative position. Add that to the total exonic # length-so-far. if exon_unspliced_start <= unspliced_offset <= exon_unspliced_end: # all offsets are base 0, can be used as indices into # sequence string exon_offset = unspliced_offset - exon_unspliced_start return total_spliced_offset + exon_offset else: exon_length = len(exon) # exon_end_position - exon_start_position + 1 total_spliced_offset += exon_length raise ValueError( "Couldn't find position %d on any exon of %s" % ( position, self.id))
Convert from an absolute chromosomal position to the offset into this transcript"s spliced mRNA. Position must be inside some exon (otherwise raise exception).
Below is the the instruction that describes the task: ### Input: Convert from an absolute chromosomal position to the offset into this transcript"s spliced mRNA. Position must be inside some exon (otherwise raise exception). ### Response: def spliced_offset(self, position): """ Convert from an absolute chromosomal position to the offset into this transcript"s spliced mRNA. Position must be inside some exon (otherwise raise exception). """ # this code is performance sensitive, so switching from # typechecks.require_integer to a simpler assertion assert type(position) == int, \ "Position argument must be an integer, got %s : %s" % ( position, type(position)) if position < self.start or position > self.end: raise ValueError( "Invalid position: %d (must be between %d and %d)" % ( position, self.start, self.end)) # offset from beginning of unspliced transcript (including introns) unspliced_offset = self.offset(position) total_spliced_offset = 0 # traverse exons in order of their appearance on the strand # Since absolute positions may decrease if on the negative strand, # we instead use unspliced offsets to get always increasing indices. # # Example: # # Exon Name: exon 1 exon 2 # Spliced Offset: 123456 789... # Intron vs. Exon: ...iiiiiieeeeeeiiiiiiiiiiiiiiiieeeeeeiiiiiiiiiii... for exon in self.exons: exon_unspliced_start, exon_unspliced_end = self.offset_range( exon.start, exon.end) # If the relative position is not within this exon, keep a running # total of the total exonic length-so-far. # # Otherwise, if the relative position is within an exon, get its # offset into that exon by subtracting the exon"s relative start # position from the relative position. Add that to the total exonic # length-so-far. if exon_unspliced_start <= unspliced_offset <= exon_unspliced_end: # all offsets are base 0, can be used as indices into # sequence string exon_offset = unspliced_offset - exon_unspliced_start return total_spliced_offset + exon_offset else: exon_length = len(exon) # exon_end_position - exon_start_position + 1 total_spliced_offset += exon_length raise ValueError( "Couldn't find position %d on any exon of %s" % ( position, self.id))
def GET(self): # pylint: disable=arguments-differ """ Display main course list page """ if not self.app.welcome_page: raise web.seeother("/courselist") return self.show_page(self.app.welcome_page)
Display main course list page
Below is the the instruction that describes the task: ### Input: Display main course list page ### Response: def GET(self): # pylint: disable=arguments-differ """ Display main course list page """ if not self.app.welcome_page: raise web.seeother("/courselist") return self.show_page(self.app.welcome_page)
def info(cwd, targets=None, user=None, username=None, password=None, fmt='str'): ''' Display the Subversion information from the checkout. cwd The path to the Subversion repository targets : None files, directories, and URLs to pass to the command as arguments svn uses '.' by default user : None Run svn as a user other than what the minion runs as username : None Connect to the Subversion server as another user password : None Connect to the Subversion server with this password .. versionadded:: 0.17.0 fmt : str How to fmt the output from info. (str, xml, list, dict) CLI Example: .. code-block:: bash salt '*' svn.info /path/to/svn/repo ''' opts = list() if fmt == 'xml': opts.append('--xml') if targets: opts += salt.utils.args.shlex_split(targets) infos = _run_svn('info', cwd, user, username, password, opts) if fmt in ('str', 'xml'): return infos info_list = [] for infosplit in infos.split('\n\n'): info_list.append(_INI_RE.findall(infosplit)) if fmt == 'list': return info_list if fmt == 'dict': return [dict(tmp) for tmp in info_list]
Display the Subversion information from the checkout. cwd The path to the Subversion repository targets : None files, directories, and URLs to pass to the command as arguments svn uses '.' by default user : None Run svn as a user other than what the minion runs as username : None Connect to the Subversion server as another user password : None Connect to the Subversion server with this password .. versionadded:: 0.17.0 fmt : str How to fmt the output from info. (str, xml, list, dict) CLI Example: .. code-block:: bash salt '*' svn.info /path/to/svn/repo
Below is the the instruction that describes the task: ### Input: Display the Subversion information from the checkout. cwd The path to the Subversion repository targets : None files, directories, and URLs to pass to the command as arguments svn uses '.' by default user : None Run svn as a user other than what the minion runs as username : None Connect to the Subversion server as another user password : None Connect to the Subversion server with this password .. versionadded:: 0.17.0 fmt : str How to fmt the output from info. (str, xml, list, dict) CLI Example: .. code-block:: bash salt '*' svn.info /path/to/svn/repo ### Response: def info(cwd, targets=None, user=None, username=None, password=None, fmt='str'): ''' Display the Subversion information from the checkout. cwd The path to the Subversion repository targets : None files, directories, and URLs to pass to the command as arguments svn uses '.' by default user : None Run svn as a user other than what the minion runs as username : None Connect to the Subversion server as another user password : None Connect to the Subversion server with this password .. versionadded:: 0.17.0 fmt : str How to fmt the output from info. (str, xml, list, dict) CLI Example: .. code-block:: bash salt '*' svn.info /path/to/svn/repo ''' opts = list() if fmt == 'xml': opts.append('--xml') if targets: opts += salt.utils.args.shlex_split(targets) infos = _run_svn('info', cwd, user, username, password, opts) if fmt in ('str', 'xml'): return infos info_list = [] for infosplit in infos.split('\n\n'): info_list.append(_INI_RE.findall(infosplit)) if fmt == 'list': return info_list if fmt == 'dict': return [dict(tmp) for tmp in info_list]
def login(self): ''' user login. ''' post_data = self.get_post_data() if 'next' in post_data: next_url = post_data['next'] else: next_url = '/' u_name = post_data['user_name'] u_pass = post_data['user_pass'] result = MUser.check_user_by_name(u_name, u_pass) # Todo: the kwd should remove from the codes. if result == 1: self.set_secure_cookie("user", u_name) MUser.update_time_login(u_name) self.redirect(next_url) elif result == 0: self.set_status(401) self.render('user/user_relogin.html', cfg=config.CMS_CFG, kwd={ 'info': '密码验证出错,请重新登陆。', 'link': '/user/login', }, userinfo=self.userinfo) elif result == -1: self.set_status(401) self.render('misc/html/404.html', cfg=config.CMS_CFG, kwd={ 'info': '没有这个用户', 'link': '/user/login', }, userinfo=self.userinfo) else: self.set_status(305) self.redirect("{0}".format(next_url))
user login.
Below is the the instruction that describes the task: ### Input: user login. ### Response: def login(self): ''' user login. ''' post_data = self.get_post_data() if 'next' in post_data: next_url = post_data['next'] else: next_url = '/' u_name = post_data['user_name'] u_pass = post_data['user_pass'] result = MUser.check_user_by_name(u_name, u_pass) # Todo: the kwd should remove from the codes. if result == 1: self.set_secure_cookie("user", u_name) MUser.update_time_login(u_name) self.redirect(next_url) elif result == 0: self.set_status(401) self.render('user/user_relogin.html', cfg=config.CMS_CFG, kwd={ 'info': '密码验证出错,请重新登陆。', 'link': '/user/login', }, userinfo=self.userinfo) elif result == -1: self.set_status(401) self.render('misc/html/404.html', cfg=config.CMS_CFG, kwd={ 'info': '没有这个用户', 'link': '/user/login', }, userinfo=self.userinfo) else: self.set_status(305) self.redirect("{0}".format(next_url))
def _crc32(self, ch, crc): """Compute the CRC32 primitive on one byte.""" return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
Compute the CRC32 primitive on one byte.
Below is the the instruction that describes the task: ### Input: Compute the CRC32 primitive on one byte. ### Response: def _crc32(self, ch, crc): """Compute the CRC32 primitive on one byte.""" return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
def datatable_df(self): """ returns the dataframe representation of the symbol's final data """ data = self._all_datatable_data() adf = pd.DataFrame(data) adf.columns = self.dt_all_cols return self._finish_df(adf, 'ALL')
returns the dataframe representation of the symbol's final data
Below is the the instruction that describes the task: ### Input: returns the dataframe representation of the symbol's final data ### Response: def datatable_df(self): """ returns the dataframe representation of the symbol's final data """ data = self._all_datatable_data() adf = pd.DataFrame(data) adf.columns = self.dt_all_cols return self._finish_df(adf, 'ALL')
def set_chat_title(chat_id, title, **kwargs): """ Use this method to change the title of a chat. Titles can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param title: New chat title, 1-255 characters :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :return: Returns True on success. :rtype: bool """ if len(title) > 255 or len(title) < 1: raise ValueError("Chat title must be 1 - 255 characters.") # required args params = dict( chat_id=chat_id, title=title ) return TelegramBotRPCRequest('setChatTitle', params=params, on_result=lambda result: result, **kwargs)
Use this method to change the title of a chat. Titles can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param title: New chat title, 1-255 characters :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :return: Returns True on success. :rtype: bool
Below is the the instruction that describes the task: ### Input: Use this method to change the title of a chat. Titles can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param title: New chat title, 1-255 characters :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :return: Returns True on success. :rtype: bool ### Response: def set_chat_title(chat_id, title, **kwargs): """ Use this method to change the title of a chat. Titles can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :param title: New chat title, 1-255 characters :param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :return: Returns True on success. :rtype: bool """ if len(title) > 255 or len(title) < 1: raise ValueError("Chat title must be 1 - 255 characters.") # required args params = dict( chat_id=chat_id, title=title ) return TelegramBotRPCRequest('setChatTitle', params=params, on_result=lambda result: result, **kwargs)
def parse_output(self, s): ''' Example output: AVR Memory Usage ---------------- Device: atmega2561 Program: 4168 bytes (1.6% Full) (.text + .data + .bootloader) Data: 72 bytes (0.9% Full) (.data + .bss + .noinit) ''' for x in s.splitlines(): if '%' in x: name = x.split(':')[0].strip().lower() nbytes = x.split(':')[1].split('b')[0].strip() nbytes = int(nbytes) perc = x.split('(')[1].split('%')[0].strip() perc = float(perc) if name == 'program': self.program_bytes = nbytes self.program_percentage = perc else: self.data_bytes = nbytes self.data_percentage = perc
Example output: AVR Memory Usage ---------------- Device: atmega2561 Program: 4168 bytes (1.6% Full) (.text + .data + .bootloader) Data: 72 bytes (0.9% Full) (.data + .bss + .noinit)
Below is the the instruction that describes the task: ### Input: Example output: AVR Memory Usage ---------------- Device: atmega2561 Program: 4168 bytes (1.6% Full) (.text + .data + .bootloader) Data: 72 bytes (0.9% Full) (.data + .bss + .noinit) ### Response: def parse_output(self, s): ''' Example output: AVR Memory Usage ---------------- Device: atmega2561 Program: 4168 bytes (1.6% Full) (.text + .data + .bootloader) Data: 72 bytes (0.9% Full) (.data + .bss + .noinit) ''' for x in s.splitlines(): if '%' in x: name = x.split(':')[0].strip().lower() nbytes = x.split(':')[1].split('b')[0].strip() nbytes = int(nbytes) perc = x.split('(')[1].split('%')[0].strip() perc = float(perc) if name == 'program': self.program_bytes = nbytes self.program_percentage = perc else: self.data_bytes = nbytes self.data_percentage = perc
def interruptwait(): """ If waituntil() has been called, this will interrupt the waiting process so it can check whether it should stop waiting. """ evt = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(NSApplicationDefined, NSPoint(), NSApplicationDefined, 0, 1, None, LIGHTBLUE_NOTIFY_ID, 0, 0) NSApplication.sharedApplication().postEvent_atStart_(evt, True)
If waituntil() has been called, this will interrupt the waiting process so it can check whether it should stop waiting.
Below is the the instruction that describes the task: ### Input: If waituntil() has been called, this will interrupt the waiting process so it can check whether it should stop waiting. ### Response: def interruptwait(): """ If waituntil() has been called, this will interrupt the waiting process so it can check whether it should stop waiting. """ evt = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(NSApplicationDefined, NSPoint(), NSApplicationDefined, 0, 1, None, LIGHTBLUE_NOTIFY_ID, 0, 0) NSApplication.sharedApplication().postEvent_atStart_(evt, True)
def expose_ancestors_or_children(self, member, collection, lang=None): """ Build an ancestor or descendant dict view based on selected information :param member: Current Member to build for :param collection: Collection from which we retrieved it :param lang: Language to express data in :return: """ x = { "id": member.id, "label": str(member.get_label(lang)), "model": str(member.model), "type": str(member.type), "size": member.size, "semantic": self.semantic(member, parent=collection) } if isinstance(member, ResourceCollection): x["lang"] = str(member.lang) return x
Build an ancestor or descendant dict view based on selected information :param member: Current Member to build for :param collection: Collection from which we retrieved it :param lang: Language to express data in :return:
Below is the the instruction that describes the task: ### Input: Build an ancestor or descendant dict view based on selected information :param member: Current Member to build for :param collection: Collection from which we retrieved it :param lang: Language to express data in :return: ### Response: def expose_ancestors_or_children(self, member, collection, lang=None): """ Build an ancestor or descendant dict view based on selected information :param member: Current Member to build for :param collection: Collection from which we retrieved it :param lang: Language to express data in :return: """ x = { "id": member.id, "label": str(member.get_label(lang)), "model": str(member.model), "type": str(member.type), "size": member.size, "semantic": self.semantic(member, parent=collection) } if isinstance(member, ResourceCollection): x["lang"] = str(member.lang) return x
def update_hard_unknown_phase_state(self): """Update in_hard_unknown_reach_phase attribute and was_in_hard_unknown_reach_phase UNKNOWN during a HARD state are not so important, and they should not raise notif about it :return: None """ self.was_in_hard_unknown_reach_phase = self.in_hard_unknown_reach_phase # We do not care about SOFT state at all # and we are sure we are no more in such a phase if self.state_type != 'HARD' or self.last_state_type != 'HARD': self.in_hard_unknown_reach_phase = False # So if we are not in already in such a phase, we check for # a start or not. So here we are sure to be in a HARD/HARD following # state if not self.in_hard_unknown_reach_phase: if self.state == 'UNKNOWN' and self.last_state != 'UNKNOWN' \ or self.state == 'UNREACHABLE' and self.last_state != 'UNREACHABLE': self.in_hard_unknown_reach_phase = True # We also backup with which state we was before enter this phase self.state_before_hard_unknown_reach_phase = self.last_state return else: # if we were already in such a phase, look for its end if self.state != 'UNKNOWN' and self.state != 'UNREACHABLE': self.in_hard_unknown_reach_phase = False # If we just exit the phase, look if we exit with a different state # than we enter or not. If so, lie and say we were not in such phase # because we need so to raise a new notif if not self.in_hard_unknown_reach_phase and self.was_in_hard_unknown_reach_phase: if self.state != self.state_before_hard_unknown_reach_phase: self.was_in_hard_unknown_reach_phase = False
Update in_hard_unknown_reach_phase attribute and was_in_hard_unknown_reach_phase UNKNOWN during a HARD state are not so important, and they should not raise notif about it :return: None
Below is the the instruction that describes the task: ### Input: Update in_hard_unknown_reach_phase attribute and was_in_hard_unknown_reach_phase UNKNOWN during a HARD state are not so important, and they should not raise notif about it :return: None ### Response: def update_hard_unknown_phase_state(self): """Update in_hard_unknown_reach_phase attribute and was_in_hard_unknown_reach_phase UNKNOWN during a HARD state are not so important, and they should not raise notif about it :return: None """ self.was_in_hard_unknown_reach_phase = self.in_hard_unknown_reach_phase # We do not care about SOFT state at all # and we are sure we are no more in such a phase if self.state_type != 'HARD' or self.last_state_type != 'HARD': self.in_hard_unknown_reach_phase = False # So if we are not in already in such a phase, we check for # a start or not. So here we are sure to be in a HARD/HARD following # state if not self.in_hard_unknown_reach_phase: if self.state == 'UNKNOWN' and self.last_state != 'UNKNOWN' \ or self.state == 'UNREACHABLE' and self.last_state != 'UNREACHABLE': self.in_hard_unknown_reach_phase = True # We also backup with which state we was before enter this phase self.state_before_hard_unknown_reach_phase = self.last_state return else: # if we were already in such a phase, look for its end if self.state != 'UNKNOWN' and self.state != 'UNREACHABLE': self.in_hard_unknown_reach_phase = False # If we just exit the phase, look if we exit with a different state # than we enter or not. If so, lie and say we were not in such phase # because we need so to raise a new notif if not self.in_hard_unknown_reach_phase and self.was_in_hard_unknown_reach_phase: if self.state != self.state_before_hard_unknown_reach_phase: self.was_in_hard_unknown_reach_phase = False
def _get_table_with_column_changes(self, blueprint, table): """ Get a copy of the given table after making the column changes. :param blueprint: The blueprint :type blueprint: Blueprint :type table: orator.dbal.table.Table :rtype: orator.dbal.table.Table """ table = table.clone() for fluent in blueprint.get_changed_columns(): column = self._get_column_for_change(table, fluent) for key, value in fluent.get_attributes().items(): option = self._map_fluent_option(key) if option is not None: method = "set_%s" % option if hasattr(column, method): getattr(column, method)(self._map_fluent_value(option, value)) return table
Get a copy of the given table after making the column changes. :param blueprint: The blueprint :type blueprint: Blueprint :type table: orator.dbal.table.Table :rtype: orator.dbal.table.Table
Below is the the instruction that describes the task: ### Input: Get a copy of the given table after making the column changes. :param blueprint: The blueprint :type blueprint: Blueprint :type table: orator.dbal.table.Table :rtype: orator.dbal.table.Table ### Response: def _get_table_with_column_changes(self, blueprint, table): """ Get a copy of the given table after making the column changes. :param blueprint: The blueprint :type blueprint: Blueprint :type table: orator.dbal.table.Table :rtype: orator.dbal.table.Table """ table = table.clone() for fluent in blueprint.get_changed_columns(): column = self._get_column_for_change(table, fluent) for key, value in fluent.get_attributes().items(): option = self._map_fluent_option(key) if option is not None: method = "set_%s" % option if hasattr(column, method): getattr(column, method)(self._map_fluent_value(option, value)) return table
def uniquify(model): ''' Remove all duplicate relationships ''' seen = set() to_remove = set() for ix, (o, r, t, a) in model: hashable_link = (o, r, t) + tuple(sorted(a.items())) #print(hashable_link) if hashable_link in seen: to_remove.add(ix) seen.add(hashable_link) model.remove(to_remove) return
Remove all duplicate relationships
Below is the the instruction that describes the task: ### Input: Remove all duplicate relationships ### Response: def uniquify(model): ''' Remove all duplicate relationships ''' seen = set() to_remove = set() for ix, (o, r, t, a) in model: hashable_link = (o, r, t) + tuple(sorted(a.items())) #print(hashable_link) if hashable_link in seen: to_remove.add(ix) seen.add(hashable_link) model.remove(to_remove) return
def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs): """ This method uploads a local file to the SAS servers file system. localfile - path to the local file to upload remotefile - path to remote file to create or overwrite overwrite - overwrite the output file if it exists? permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax """ valid = self._sb.file_info(remotefile, quiet = True) if valid is None: remf = remotefile else: if valid == {}: remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2] else: remf = remotefile if overwrite == False: return {'Success' : False, 'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."} try: fd = open(localfile, 'rb') except OSError as e: return {'Success' : False, 'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)} fsize = os.path.getsize(localfile) if fsize > 0: code = "filename _sp_updn '"+remf+"' recfm=N permission='"+permission+"';" ll = self.submit(code, 'text') log1 = ll['LOG'] self.stdin[0].send(str(fsize).encode()+b'tom says EOL=UPLOAD \n') while True: buf = fd.read1(32768) sent = 0 send = len(buf) blen = send if blen == 0: break while send: try: sent = 0 sent = self.stdout[0].send(buf[blen-send:blen]) except (BlockingIOError): pass send -= sent code = "filename _sp_updn;" else: log1 = '' code = """ filename _sp_updn '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""'; data _null_; fid = fopen('_sp_updn', 'O'); if fid then rc = fclose(fid); run; filename _sp_updn; """ ll2 = self.submit(code, 'text') fd.close() return {'Success' : True, 'LOG' : log1+ll2['LOG']}
This method uploads a local file to the SAS servers file system. localfile - path to the local file to upload remotefile - path to remote file to create or overwrite overwrite - overwrite the output file if it exists? permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
Below is the the instruction that describes the task: ### Input: This method uploads a local file to the SAS servers file system. localfile - path to the local file to upload remotefile - path to remote file to create or overwrite overwrite - overwrite the output file if it exists? permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax ### Response: def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs): """ This method uploads a local file to the SAS servers file system. localfile - path to the local file to upload remotefile - path to remote file to create or overwrite overwrite - overwrite the output file if it exists? permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax """ valid = self._sb.file_info(remotefile, quiet = True) if valid is None: remf = remotefile else: if valid == {}: remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2] else: remf = remotefile if overwrite == False: return {'Success' : False, 'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."} try: fd = open(localfile, 'rb') except OSError as e: return {'Success' : False, 'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)} fsize = os.path.getsize(localfile) if fsize > 0: code = "filename _sp_updn '"+remf+"' recfm=N permission='"+permission+"';" ll = self.submit(code, 'text') log1 = ll['LOG'] self.stdin[0].send(str(fsize).encode()+b'tom says EOL=UPLOAD \n') while True: buf = fd.read1(32768) sent = 0 send = len(buf) blen = send if blen == 0: break while send: try: sent = 0 sent = self.stdout[0].send(buf[blen-send:blen]) except (BlockingIOError): pass send -= sent code = "filename _sp_updn;" else: log1 = '' code = """ filename _sp_updn '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""'; data _null_; fid = fopen('_sp_updn', 'O'); if fid then rc = fclose(fid); run; filename _sp_updn; """ ll2 = self.submit(code, 'text') fd.close() return {'Success' : True, 'LOG' : log1+ll2['LOG']}
def models(self): """Return self.application models.""" Model_ = self.app.config['PEEWEE_MODELS_CLASS'] ignore = self.app.config['PEEWEE_MODELS_IGNORE'] models = [] if Model_ is not Model: try: mod = import_module(self.app.config['PEEWEE_MODELS_MODULE']) for model in dir(mod): models = getattr(mod, model) if not isinstance(model, pw.Model): continue models.append(models) except ImportError: return models elif isinstance(Model_, BaseSignalModel): models = BaseSignalModel.models return [m for m in models if m._meta.name not in ignore]
Return self.application models.
Below is the the instruction that describes the task: ### Input: Return self.application models. ### Response: def models(self): """Return self.application models.""" Model_ = self.app.config['PEEWEE_MODELS_CLASS'] ignore = self.app.config['PEEWEE_MODELS_IGNORE'] models = [] if Model_ is not Model: try: mod = import_module(self.app.config['PEEWEE_MODELS_MODULE']) for model in dir(mod): models = getattr(mod, model) if not isinstance(model, pw.Model): continue models.append(models) except ImportError: return models elif isinstance(Model_, BaseSignalModel): models = BaseSignalModel.models return [m for m in models if m._meta.name not in ignore]
def output(data, **kwargs): # pylint: disable=unused-argument ''' Read in the dict structure generated by the salt key API methods and print the structure. ''' color = salt.utils.color.get_colors( __opts__.get('color'), __opts__.get('color_theme')) strip_colors = __opts__.get('strip_colors', True) ident = 0 if __opts__.get('__multi_key'): ident = 4 if __opts__['transport'] in ('zeromq', 'tcp'): acc = 'minions' pend = 'minions_pre' den = 'minions_denied' rej = 'minions_rejected' cmap = {pend: color['RED'], acc: color['GREEN'], den: color['MAGENTA'], rej: color['BLUE'], 'local': color['MAGENTA']} trans = {pend: u'{0}{1}Unaccepted Keys:{2}'.format( ' ' * ident, color['LIGHT_RED'], color['ENDC']), acc: u'{0}{1}Accepted Keys:{2}'.format( ' ' * ident, color['LIGHT_GREEN'], color['ENDC']), den: u'{0}{1}Denied Keys:{2}'.format( ' ' * ident, color['LIGHT_MAGENTA'], color['ENDC']), rej: u'{0}{1}Rejected Keys:{2}'.format( ' ' * ident, color['LIGHT_BLUE'], color['ENDC']), 'local': u'{0}{1}Local Keys:{2}'.format( ' ' * ident, color['LIGHT_MAGENTA'], color['ENDC'])} else: acc = 'accepted' pend = 'pending' rej = 'rejected' cmap = {pend: color['RED'], acc: color['GREEN'], rej: color['BLUE'], 'local': color['MAGENTA']} trans = {pend: u'{0}{1}Unaccepted Keys:{2}'.format( ' ' * ident, color['LIGHT_RED'], color['ENDC']), acc: u'{0}{1}Accepted Keys:{2}'.format( ' ' * ident, color['LIGHT_GREEN'], color['ENDC']), rej: u'{0}{1}Rejected Keys:{2}'.format( ' ' * ident, color['LIGHT_BLUE'], color['ENDC']), 'local': u'{0}{1}Local Keys:{2}'.format( ' ' * ident, color['LIGHT_MAGENTA'], color['ENDC'])} ret = '' for status in sorted(data): ret += u'{0}\n'.format(trans[status]) for key in sorted(data[status]): key = salt.utils.data.decode(key) skey = salt.output.strip_esc_sequence(key) if strip_colors else key if isinstance(data[status], list): ret += u'{0}{1}{2}{3}\n'.format( ' ' * ident, cmap[status], skey, color['ENDC']) if isinstance(data[status], dict): ret += u'{0}{1}{2}: {3}{4}\n'.format( ' ' * ident, cmap[status], skey, data[status][key], color['ENDC']) return ret
Read in the dict structure generated by the salt key API methods and print the structure.
Below is the the instruction that describes the task: ### Input: Read in the dict structure generated by the salt key API methods and print the structure. ### Response: def output(data, **kwargs): # pylint: disable=unused-argument ''' Read in the dict structure generated by the salt key API methods and print the structure. ''' color = salt.utils.color.get_colors( __opts__.get('color'), __opts__.get('color_theme')) strip_colors = __opts__.get('strip_colors', True) ident = 0 if __opts__.get('__multi_key'): ident = 4 if __opts__['transport'] in ('zeromq', 'tcp'): acc = 'minions' pend = 'minions_pre' den = 'minions_denied' rej = 'minions_rejected' cmap = {pend: color['RED'], acc: color['GREEN'], den: color['MAGENTA'], rej: color['BLUE'], 'local': color['MAGENTA']} trans = {pend: u'{0}{1}Unaccepted Keys:{2}'.format( ' ' * ident, color['LIGHT_RED'], color['ENDC']), acc: u'{0}{1}Accepted Keys:{2}'.format( ' ' * ident, color['LIGHT_GREEN'], color['ENDC']), den: u'{0}{1}Denied Keys:{2}'.format( ' ' * ident, color['LIGHT_MAGENTA'], color['ENDC']), rej: u'{0}{1}Rejected Keys:{2}'.format( ' ' * ident, color['LIGHT_BLUE'], color['ENDC']), 'local': u'{0}{1}Local Keys:{2}'.format( ' ' * ident, color['LIGHT_MAGENTA'], color['ENDC'])} else: acc = 'accepted' pend = 'pending' rej = 'rejected' cmap = {pend: color['RED'], acc: color['GREEN'], rej: color['BLUE'], 'local': color['MAGENTA']} trans = {pend: u'{0}{1}Unaccepted Keys:{2}'.format( ' ' * ident, color['LIGHT_RED'], color['ENDC']), acc: u'{0}{1}Accepted Keys:{2}'.format( ' ' * ident, color['LIGHT_GREEN'], color['ENDC']), rej: u'{0}{1}Rejected Keys:{2}'.format( ' ' * ident, color['LIGHT_BLUE'], color['ENDC']), 'local': u'{0}{1}Local Keys:{2}'.format( ' ' * ident, color['LIGHT_MAGENTA'], color['ENDC'])} ret = '' for status in sorted(data): ret += u'{0}\n'.format(trans[status]) for key in sorted(data[status]): key = salt.utils.data.decode(key) skey = salt.output.strip_esc_sequence(key) if strip_colors else key if isinstance(data[status], list): ret += u'{0}{1}{2}{3}\n'.format( ' ' * ident, cmap[status], skey, color['ENDC']) if isinstance(data[status], dict): ret += u'{0}{1}{2}: {3}{4}\n'.format( ' ' * ident, cmap[status], skey, data[status][key], color['ENDC']) return ret
def insert(self, tname, record=None, columns=None, astype=None): ''' Inserts record into the provided table from the database. Returns inserted record as list, str or series depending on the value of `astype`. Parameters ---------- tname : str Table to insert records into. where : dict or None (default `None`) Dictionary of <column, value> where value can be of str type for exact match or a compiled regex expression for more advanced matching. where_not : dict or None (default `None`) Identical to `where` but for negative-matching. columns: list of str, str or None (default `None`) Column(s) to return for the inserted records. astype: str, type or None (default `None`) Type to cast the output to. Possible values are: `nonetype`, `series`, `str`, `dict` `json`. If this is `None`, falls back to the type provided to the constructor. If a type was provided to the constructor but the user wants to avoid any casting, "nonetype" should be passed as the value. Returns ------- record : str, dict or series Inserted record. Output type depends on `astype` parameter. Examples -------- >>> db = PandasDatabase("test") >>> db.insert("test", record={"Name": "John"}) Name John __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 dtype: object ''' tname = self._check_tname(tname) record = PandasDatabase._check_dict_type(str, str, record, cast=self.auto_cast) columns = PandasDatabase._check_type_iter(str, columns) record[self._id_colname] = str(uuid.uuid4()) # If a row generation function exists for this table, use that record_new = {} if tname in self._rowgens: self._print('Using row generator to create new record in "%s"' % tname) record_new = self._rowgens[tname]() # Set as many fields as provided in new record, leave the rest as-is if record is not None: for field_key, field_val in record.items(): record_new[field_key] = field_val with self._lock: self._print('Inserting new record into "%s": %r' % (tname, record_new)) self._update_schema(tname, record_new.keys()) row = Series(record_new) self._db[tname].loc[len(self._db[tname])] = row # Save the changes to disk if required if self.auto_save: self.save() if len(columns) > 0: row = row[columns] return self._output(row, single=True, astype=astype)
Inserts record into the provided table from the database. Returns inserted record as list, str or series depending on the value of `astype`. Parameters ---------- tname : str Table to insert records into. where : dict or None (default `None`) Dictionary of <column, value> where value can be of str type for exact match or a compiled regex expression for more advanced matching. where_not : dict or None (default `None`) Identical to `where` but for negative-matching. columns: list of str, str or None (default `None`) Column(s) to return for the inserted records. astype: str, type or None (default `None`) Type to cast the output to. Possible values are: `nonetype`, `series`, `str`, `dict` `json`. If this is `None`, falls back to the type provided to the constructor. If a type was provided to the constructor but the user wants to avoid any casting, "nonetype" should be passed as the value. Returns ------- record : str, dict or series Inserted record. Output type depends on `astype` parameter. Examples -------- >>> db = PandasDatabase("test") >>> db.insert("test", record={"Name": "John"}) Name John __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 dtype: object
Below is the the instruction that describes the task: ### Input: Inserts record into the provided table from the database. Returns inserted record as list, str or series depending on the value of `astype`. Parameters ---------- tname : str Table to insert records into. where : dict or None (default `None`) Dictionary of <column, value> where value can be of str type for exact match or a compiled regex expression for more advanced matching. where_not : dict or None (default `None`) Identical to `where` but for negative-matching. columns: list of str, str or None (default `None`) Column(s) to return for the inserted records. astype: str, type or None (default `None`) Type to cast the output to. Possible values are: `nonetype`, `series`, `str`, `dict` `json`. If this is `None`, falls back to the type provided to the constructor. If a type was provided to the constructor but the user wants to avoid any casting, "nonetype" should be passed as the value. Returns ------- record : str, dict or series Inserted record. Output type depends on `astype` parameter. Examples -------- >>> db = PandasDatabase("test") >>> db.insert("test", record={"Name": "John"}) Name John __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 dtype: object ### Response: def insert(self, tname, record=None, columns=None, astype=None): ''' Inserts record into the provided table from the database. Returns inserted record as list, str or series depending on the value of `astype`. Parameters ---------- tname : str Table to insert records into. where : dict or None (default `None`) Dictionary of <column, value> where value can be of str type for exact match or a compiled regex expression for more advanced matching. where_not : dict or None (default `None`) Identical to `where` but for negative-matching. columns: list of str, str or None (default `None`) Column(s) to return for the inserted records. astype: str, type or None (default `None`) Type to cast the output to. Possible values are: `nonetype`, `series`, `str`, `dict` `json`. If this is `None`, falls back to the type provided to the constructor. If a type was provided to the constructor but the user wants to avoid any casting, "nonetype" should be passed as the value. Returns ------- record : str, dict or series Inserted record. Output type depends on `astype` parameter. Examples -------- >>> db = PandasDatabase("test") >>> db.insert("test", record={"Name": "John"}) Name John __id__ dc876999-1f5b-4262-b6bf-c23b875f3a54 dtype: object ''' tname = self._check_tname(tname) record = PandasDatabase._check_dict_type(str, str, record, cast=self.auto_cast) columns = PandasDatabase._check_type_iter(str, columns) record[self._id_colname] = str(uuid.uuid4()) # If a row generation function exists for this table, use that record_new = {} if tname in self._rowgens: self._print('Using row generator to create new record in "%s"' % tname) record_new = self._rowgens[tname]() # Set as many fields as provided in new record, leave the rest as-is if record is not None: for field_key, field_val in record.items(): record_new[field_key] = field_val with self._lock: self._print('Inserting new record into "%s": %r' % (tname, record_new)) self._update_schema(tname, record_new.keys()) row = Series(record_new) self._db[tname].loc[len(self._db[tname])] = row # Save the changes to disk if required if self.auto_save: self.save() if len(columns) > 0: row = row[columns] return self._output(row, single=True, astype=astype)
def detect_blob(self, img, filters): """ "filters" must be something similar to: filters = { 'R': (150, 255), # (min, max) 'S': (150, 255), } """ acc_mask = ones(img.shape[:2], dtype=uint8) * 255 rgb = img.copy() hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) for c, (min, max) in filters.items(): img = rgb if c in 'RGB' else hsv mask = img[:, :, self.channels[c]] mask[mask < min] = 0 mask[mask > max] = 0 acc_mask &= mask kernel = ones((5, 5), uint8) acc_mask = cv2.dilate(cv2.erode(acc_mask, kernel), kernel) circles = cv2.HoughCircles(acc_mask, cv2.HOUGH_GRADIENT, 3, img.shape[0] / 5.) return circles.reshape(-1, 3) if circles is not None else []
"filters" must be something similar to: filters = { 'R': (150, 255), # (min, max) 'S': (150, 255), }
Below is the the instruction that describes the task: ### Input: "filters" must be something similar to: filters = { 'R': (150, 255), # (min, max) 'S': (150, 255), } ### Response: def detect_blob(self, img, filters): """ "filters" must be something similar to: filters = { 'R': (150, 255), # (min, max) 'S': (150, 255), } """ acc_mask = ones(img.shape[:2], dtype=uint8) * 255 rgb = img.copy() hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) for c, (min, max) in filters.items(): img = rgb if c in 'RGB' else hsv mask = img[:, :, self.channels[c]] mask[mask < min] = 0 mask[mask > max] = 0 acc_mask &= mask kernel = ones((5, 5), uint8) acc_mask = cv2.dilate(cv2.erode(acc_mask, kernel), kernel) circles = cv2.HoughCircles(acc_mask, cv2.HOUGH_GRADIENT, 3, img.shape[0] / 5.) return circles.reshape(-1, 3) if circles is not None else []
def configure(self, options, conf): """ Configures the plugin. """ super(EverestNosePlugin, self).configure(options, conf) opt_val = getattr(options, self.__dest_opt_name, None) if opt_val: self.enabled = True EverestIni.ini_file_path = opt_val
Configures the plugin.
Below is the the instruction that describes the task: ### Input: Configures the plugin. ### Response: def configure(self, options, conf): """ Configures the plugin. """ super(EverestNosePlugin, self).configure(options, conf) opt_val = getattr(options, self.__dest_opt_name, None) if opt_val: self.enabled = True EverestIni.ini_file_path = opt_val
def set_user_avatar(self, username, avatar): """Set a user's avatar. :param username: the user to set the avatar for :param avatar: ID of the avatar to set """ self._set_avatar( {'username': username}, self._get_url('user/avatar'), avatar)
Set a user's avatar. :param username: the user to set the avatar for :param avatar: ID of the avatar to set
Below is the the instruction that describes the task: ### Input: Set a user's avatar. :param username: the user to set the avatar for :param avatar: ID of the avatar to set ### Response: def set_user_avatar(self, username, avatar): """Set a user's avatar. :param username: the user to set the avatar for :param avatar: ID of the avatar to set """ self._set_avatar( {'username': username}, self._get_url('user/avatar'), avatar)
def poller_tasker_handler(event, context): # pylint: disable=W0613 """ Historical VPC Poller Tasker. The Poller is run at a set interval in order to ensure that changes do not go undetected by Historical. Historical pollers generate `polling events` which simulate changes. These polling events contain configuration data such as the account/region defining where the collector should attempt to gather data from. This is the entry point. This will task subsequent Poller lambdas to list all of a given resource in a select few AWS accounts. """ LOG.debug('[@] Running Poller Tasker...') queue_url = get_queue_url(os.environ.get('POLLER_TASKER_QUEUE_NAME', 'HistoricalVPCPollerTasker')) poller_task_schema = HistoricalPollerTaskEventModel() events = [] for account in get_historical_accounts(): for region in POLL_REGIONS: events.append(poller_task_schema.serialize_me(account['id'], region)) try: produce_events(events, queue_url, randomize_delay=RANDOMIZE_POLLER) except ClientError as exc: LOG.error(f'[X] Unable to generate poller tasker events! Reason: {exc}') LOG.debug('[@] Finished tasking the pollers.')
Historical VPC Poller Tasker. The Poller is run at a set interval in order to ensure that changes do not go undetected by Historical. Historical pollers generate `polling events` which simulate changes. These polling events contain configuration data such as the account/region defining where the collector should attempt to gather data from. This is the entry point. This will task subsequent Poller lambdas to list all of a given resource in a select few AWS accounts.
Below is the the instruction that describes the task: ### Input: Historical VPC Poller Tasker. The Poller is run at a set interval in order to ensure that changes do not go undetected by Historical. Historical pollers generate `polling events` which simulate changes. These polling events contain configuration data such as the account/region defining where the collector should attempt to gather data from. This is the entry point. This will task subsequent Poller lambdas to list all of a given resource in a select few AWS accounts. ### Response: def poller_tasker_handler(event, context): # pylint: disable=W0613 """ Historical VPC Poller Tasker. The Poller is run at a set interval in order to ensure that changes do not go undetected by Historical. Historical pollers generate `polling events` which simulate changes. These polling events contain configuration data such as the account/region defining where the collector should attempt to gather data from. This is the entry point. This will task subsequent Poller lambdas to list all of a given resource in a select few AWS accounts. """ LOG.debug('[@] Running Poller Tasker...') queue_url = get_queue_url(os.environ.get('POLLER_TASKER_QUEUE_NAME', 'HistoricalVPCPollerTasker')) poller_task_schema = HistoricalPollerTaskEventModel() events = [] for account in get_historical_accounts(): for region in POLL_REGIONS: events.append(poller_task_schema.serialize_me(account['id'], region)) try: produce_events(events, queue_url, randomize_delay=RANDOMIZE_POLLER) except ClientError as exc: LOG.error(f'[X] Unable to generate poller tasker events! Reason: {exc}') LOG.debug('[@] Finished tasking the pollers.')
def next(self): """Returns next error checking strategy.""" # Where this link is in the chain: location = self.chain.index(self) if not self.end(): return self.chain[location + 1]
Returns next error checking strategy.
Below is the the instruction that describes the task: ### Input: Returns next error checking strategy. ### Response: def next(self): """Returns next error checking strategy.""" # Where this link is in the chain: location = self.chain.index(self) if not self.end(): return self.chain[location + 1]
def tag_add(self, item, tag): """ Add tag to the tags of item. :param item: item identifier :type item: str :param tag: tag name :type tag: str """ tags = self.item(item, "tags") self.item(item, tags=tags + (tag,))
Add tag to the tags of item. :param item: item identifier :type item: str :param tag: tag name :type tag: str
Below is the the instruction that describes the task: ### Input: Add tag to the tags of item. :param item: item identifier :type item: str :param tag: tag name :type tag: str ### Response: def tag_add(self, item, tag): """ Add tag to the tags of item. :param item: item identifier :type item: str :param tag: tag name :type tag: str """ tags = self.item(item, "tags") self.item(item, tags=tags + (tag,))
def finish_commit(self, commit): """ Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.FinishCommitRequest(commit=commit_from(commit)) res = self.stub.FinishCommit(req, metadata=self.metadata) return res
Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit.
Below is the the instruction that describes the task: ### Input: Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit. ### Response: def finish_commit(self, commit): """ Ends the process of committing data to a Repo and persists the Commit. Once a Commit is finished the data becomes immutable and future attempts to write to it with PutFile will error. Params: * commit: A tuple, string, or Commit object representing the commit. """ req = proto.FinishCommitRequest(commit=commit_from(commit)) res = self.stub.FinishCommit(req, metadata=self.metadata) return res
def set_args(self, **kwargs): """ Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value """ try: kwargs_items = kwargs.iteritems() except AttributeError: kwargs_items = kwargs.items() for key, val in kwargs_items: self.args[key] = val
Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value
Below is the the instruction that describes the task: ### Input: Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value ### Response: def set_args(self, **kwargs): """ Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value """ try: kwargs_items = kwargs.iteritems() except AttributeError: kwargs_items = kwargs.items() for key, val in kwargs_items: self.args[key] = val
def heartbeat_callback(self, session=None): """Self destruct task if state has been moved away from running externally""" if self.terminating: # ensure termination if processes are created later self.task_runner.terminate() return self.task_instance.refresh_from_db() ti = self.task_instance fqdn = get_hostname() same_hostname = fqdn == ti.hostname same_process = ti.pid == os.getpid() if ti.state == State.RUNNING: if not same_hostname: self.log.warning("The recorded hostname %s " "does not match this instance's hostname " "%s", ti.hostname, fqdn) raise AirflowException("Hostname of job runner does not match") elif not same_process: current_pid = os.getpid() self.log.warning("Recorded pid %s does not match " "the current pid %s", ti.pid, current_pid) raise AirflowException("PID of job runner does not match") elif ( self.task_runner.return_code() is None and hasattr(self.task_runner, 'process') ): self.log.warning( "State of this instance has been externally set to %s. " "Taking the poison pill.", ti.state ) self.task_runner.terminate() self.terminating = True
Self destruct task if state has been moved away from running externally
Below is the the instruction that describes the task: ### Input: Self destruct task if state has been moved away from running externally ### Response: def heartbeat_callback(self, session=None): """Self destruct task if state has been moved away from running externally""" if self.terminating: # ensure termination if processes are created later self.task_runner.terminate() return self.task_instance.refresh_from_db() ti = self.task_instance fqdn = get_hostname() same_hostname = fqdn == ti.hostname same_process = ti.pid == os.getpid() if ti.state == State.RUNNING: if not same_hostname: self.log.warning("The recorded hostname %s " "does not match this instance's hostname " "%s", ti.hostname, fqdn) raise AirflowException("Hostname of job runner does not match") elif not same_process: current_pid = os.getpid() self.log.warning("Recorded pid %s does not match " "the current pid %s", ti.pid, current_pid) raise AirflowException("PID of job runner does not match") elif ( self.task_runner.return_code() is None and hasattr(self.task_runner, 'process') ): self.log.warning( "State of this instance has been externally set to %s. " "Taking the poison pill.", ti.state ) self.task_runner.terminate() self.terminating = True
def downsample_with_averaging(array, factor): """Downsample x by factor using averaging. @return: The downsampled array, of the same type as x. """ factor = tuple(factor) output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor)) temp = np.zeros(output_shape, dtype=np.float32) counts = np.zeros(output_shape, np.int) for offset in np.ndindex(factor): part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))] indexing_expr = tuple(np.s_[:s] for s in part.shape) temp[indexing_expr] += part counts[indexing_expr] += 1 return np.cast[array.dtype](temp / counts)
Downsample x by factor using averaging. @return: The downsampled array, of the same type as x.
Below is the the instruction that describes the task: ### Input: Downsample x by factor using averaging. @return: The downsampled array, of the same type as x. ### Response: def downsample_with_averaging(array, factor): """Downsample x by factor using averaging. @return: The downsampled array, of the same type as x. """ factor = tuple(factor) output_shape = tuple(int(math.ceil(s / f)) for s, f in zip(array.shape, factor)) temp = np.zeros(output_shape, dtype=np.float32) counts = np.zeros(output_shape, np.int) for offset in np.ndindex(factor): part = array[tuple(np.s_[o::f] for o, f in zip(offset, factor))] indexing_expr = tuple(np.s_[:s] for s in part.shape) temp[indexing_expr] += part counts[indexing_expr] += 1 return np.cast[array.dtype](temp / counts)
def normalize_encoding(encoding, default=DEFAULT_ENCODING): """Normalize the encoding name, replace ASCII w/ UTF-8.""" if encoding is None: return default encoding = encoding.lower().strip() if encoding in ['', 'ascii']: return default try: codecs.lookup(encoding) return encoding except LookupError: return default
Normalize the encoding name, replace ASCII w/ UTF-8.
Below is the the instruction that describes the task: ### Input: Normalize the encoding name, replace ASCII w/ UTF-8. ### Response: def normalize_encoding(encoding, default=DEFAULT_ENCODING): """Normalize the encoding name, replace ASCII w/ UTF-8.""" if encoding is None: return default encoding = encoding.lower().strip() if encoding in ['', 'ascii']: return default try: codecs.lookup(encoding) return encoding except LookupError: return default
def getSCDPURL(self, serviceType, default=None): """Returns the SCDP (Service Control Protocol Document) URL for a given service type. When the device definitions have been loaded with :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` this method returns for a given service type/namespace the associated URL to the SCDP. If the device definitions have not been loaded a default value can be given which gets returned instead. The SCDP specifies all the interaction functionality a device provides. :param serviceType: the service type to look up for :param default: the default return value in case the service type is not found and device definitions are not loaded :type default: str or None :return: the URL/URI :rtype: str or None :raises ValueError: if the device did load device definitions and the service type is not known. .. seealso:: :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` """ if serviceType in self.__deviceServiceDefinitions.keys(): return self.__deviceServiceDefinitions[serviceType]["scpdURL"] # check if definitions have been loaded, then dont return the default if self.__deviceXMLInitialized: raise ValueError("Device do not support given serviceType: " + serviceType) return default
Returns the SCDP (Service Control Protocol Document) URL for a given service type. When the device definitions have been loaded with :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` this method returns for a given service type/namespace the associated URL to the SCDP. If the device definitions have not been loaded a default value can be given which gets returned instead. The SCDP specifies all the interaction functionality a device provides. :param serviceType: the service type to look up for :param default: the default return value in case the service type is not found and device definitions are not loaded :type default: str or None :return: the URL/URI :rtype: str or None :raises ValueError: if the device did load device definitions and the service type is not known. .. seealso:: :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions`
Below is the the instruction that describes the task: ### Input: Returns the SCDP (Service Control Protocol Document) URL for a given service type. When the device definitions have been loaded with :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` this method returns for a given service type/namespace the associated URL to the SCDP. If the device definitions have not been loaded a default value can be given which gets returned instead. The SCDP specifies all the interaction functionality a device provides. :param serviceType: the service type to look up for :param default: the default return value in case the service type is not found and device definitions are not loaded :type default: str or None :return: the URL/URI :rtype: str or None :raises ValueError: if the device did load device definitions and the service type is not known. .. seealso:: :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` ### Response: def getSCDPURL(self, serviceType, default=None): """Returns the SCDP (Service Control Protocol Document) URL for a given service type. When the device definitions have been loaded with :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` this method returns for a given service type/namespace the associated URL to the SCDP. If the device definitions have not been loaded a default value can be given which gets returned instead. The SCDP specifies all the interaction functionality a device provides. :param serviceType: the service type to look up for :param default: the default return value in case the service type is not found and device definitions are not loaded :type default: str or None :return: the URL/URI :rtype: str or None :raises ValueError: if the device did load device definitions and the service type is not known. .. seealso:: :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` """ if serviceType in self.__deviceServiceDefinitions.keys(): return self.__deviceServiceDefinitions[serviceType]["scpdURL"] # check if definitions have been loaded, then dont return the default if self.__deviceXMLInitialized: raise ValueError("Device do not support given serviceType: " + serviceType) return default
def create_app(): """ Flask application factory """ # Create Flask app load app.config app = Flask(__name__) app.config.from_object(__name__+'.ConfigClass') # Initialize Flask-SQLAlchemy db = SQLAlchemy(app) # Define the User data-model. # NB: Make sure to add flask_user UserMixin !!! class User(db.Model, UserMixin): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1') # User authentication information. The collation='NOCASE' is required # to search case insensitively when USER_IFIND_MODE is 'nocase_collation'. username = db.Column(db.String(100, collation='NOCASE'), nullable=False, unique=True) password = db.Column(db.String(255), nullable=False, server_default='') email_confirmed_at = db.Column(db.DateTime()) # User information first_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='') last_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='') # Create all database tables db.create_all() # Setup Flask-User and specify the User data-model user_manager = UserManager(app, db, User) # The Home page is accessible to anyone @app.route('/') def home_page(): # String-based templates return render_template_string(""" {% extends "flask_user_layout.html" %} {% block content %} <h2>Home page</h2> <p><a href={{ url_for('user.register') }}>Register</a></p> <p><a href={{ url_for('user.login') }}>Sign in</a></p> <p><a href={{ url_for('home_page') }}>Home page</a> (accessible to anyone)</p> <p><a href={{ url_for('member_page') }}>Member page</a> (login required)</p> <p><a href={{ url_for('user.logout') }}>Sign out</a></p> {% endblock %} """) # The Members page is only accessible to authenticated users via the @login_required decorator @app.route('/members') @login_required # User must be authenticated def member_page(): # String-based templates return render_template_string(""" {% extends "flask_user_layout.html" %} {% block content %} <h2>Members page</h2> <p><a href={{ url_for('user.register') }}>Register</a></p> <p><a href={{ url_for('user.login') }}>Sign in</a></p> <p><a href={{ url_for('home_page') }}>Home page</a> (accessible to anyone)</p> <p><a href={{ url_for('member_page') }}>Member page</a> (login required)</p> <p><a href={{ url_for('user.logout') }}>Sign out</a></p> {% endblock %} """) return app
Flask application factory
Below is the the instruction that describes the task: ### Input: Flask application factory ### Response: def create_app(): """ Flask application factory """ # Create Flask app load app.config app = Flask(__name__) app.config.from_object(__name__+'.ConfigClass') # Initialize Flask-SQLAlchemy db = SQLAlchemy(app) # Define the User data-model. # NB: Make sure to add flask_user UserMixin !!! class User(db.Model, UserMixin): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) active = db.Column('is_active', db.Boolean(), nullable=False, server_default='1') # User authentication information. The collation='NOCASE' is required # to search case insensitively when USER_IFIND_MODE is 'nocase_collation'. username = db.Column(db.String(100, collation='NOCASE'), nullable=False, unique=True) password = db.Column(db.String(255), nullable=False, server_default='') email_confirmed_at = db.Column(db.DateTime()) # User information first_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='') last_name = db.Column(db.String(100, collation='NOCASE'), nullable=False, server_default='') # Create all database tables db.create_all() # Setup Flask-User and specify the User data-model user_manager = UserManager(app, db, User) # The Home page is accessible to anyone @app.route('/') def home_page(): # String-based templates return render_template_string(""" {% extends "flask_user_layout.html" %} {% block content %} <h2>Home page</h2> <p><a href={{ url_for('user.register') }}>Register</a></p> <p><a href={{ url_for('user.login') }}>Sign in</a></p> <p><a href={{ url_for('home_page') }}>Home page</a> (accessible to anyone)</p> <p><a href={{ url_for('member_page') }}>Member page</a> (login required)</p> <p><a href={{ url_for('user.logout') }}>Sign out</a></p> {% endblock %} """) # The Members page is only accessible to authenticated users via the @login_required decorator @app.route('/members') @login_required # User must be authenticated def member_page(): # String-based templates return render_template_string(""" {% extends "flask_user_layout.html" %} {% block content %} <h2>Members page</h2> <p><a href={{ url_for('user.register') }}>Register</a></p> <p><a href={{ url_for('user.login') }}>Sign in</a></p> <p><a href={{ url_for('home_page') }}>Home page</a> (accessible to anyone)</p> <p><a href={{ url_for('member_page') }}>Member page</a> (login required)</p> <p><a href={{ url_for('user.logout') }}>Sign out</a></p> {% endblock %} """) return app
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-1 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) x_vf = _get_min_distance_to_volcanic_front(sites.lon, sites.lat) mean = _apply_volcanic_front_correction(mean, x_vf, rup.hypo_depth, imt) return mean, stddevs
Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-1 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
Below is the the instruction that describes the task: ### Input: Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-1 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. ### Response: def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-1 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ mean, stddevs = super().get_mean_and_stddevs( sites, rup, dists, imt, stddev_types) x_vf = _get_min_distance_to_volcanic_front(sites.lon, sites.lat) mean = _apply_volcanic_front_correction(mean, x_vf, rup.hypo_depth, imt) return mean, stddevs
def fasta_from_biom(table, fasta_file_name): '''Save sequences from a biom table to a fasta file Parameters ---------- table : biom.Table The biom table containing the sequences fasta_file_name : str Name of the fasta output file ''' logger = logging.getLogger(__name__) logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name) with open(fasta_file_name, 'w') as f: for cseq in table.ids(axis='observation'): f.write('>%s\n%s\n' % (cseq, cseq)) logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
Save sequences from a biom table to a fasta file Parameters ---------- table : biom.Table The biom table containing the sequences fasta_file_name : str Name of the fasta output file
Below is the the instruction that describes the task: ### Input: Save sequences from a biom table to a fasta file Parameters ---------- table : biom.Table The biom table containing the sequences fasta_file_name : str Name of the fasta output file ### Response: def fasta_from_biom(table, fasta_file_name): '''Save sequences from a biom table to a fasta file Parameters ---------- table : biom.Table The biom table containing the sequences fasta_file_name : str Name of the fasta output file ''' logger = logging.getLogger(__name__) logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name) with open(fasta_file_name, 'w') as f: for cseq in table.ids(axis='observation'): f.write('>%s\n%s\n' % (cseq, cseq)) logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def make_idx(f, lb, ub): """ This is a little utility function to replace an oft-called set of operations Parameters ---------- f : 1d array A frequency axis along which we want to slice lb : float Defines the upper bound of slicing ub : float Defines the lower bound of slicing Returns ------- idx : a slice object """ idx0 = np.argmin(np.abs(f - lb)) idx1 = np.argmin(np.abs(f - ub)) # Determine which should be on which side if f[0] > f[1]: direction = -1 else: direction = 1 if direction == -1: idx = slice(idx1, idx0) elif direction == 1: idx = slice(idx0, idx1) return idx
This is a little utility function to replace an oft-called set of operations Parameters ---------- f : 1d array A frequency axis along which we want to slice lb : float Defines the upper bound of slicing ub : float Defines the lower bound of slicing Returns ------- idx : a slice object
Below is the the instruction that describes the task: ### Input: This is a little utility function to replace an oft-called set of operations Parameters ---------- f : 1d array A frequency axis along which we want to slice lb : float Defines the upper bound of slicing ub : float Defines the lower bound of slicing Returns ------- idx : a slice object ### Response: def make_idx(f, lb, ub): """ This is a little utility function to replace an oft-called set of operations Parameters ---------- f : 1d array A frequency axis along which we want to slice lb : float Defines the upper bound of slicing ub : float Defines the lower bound of slicing Returns ------- idx : a slice object """ idx0 = np.argmin(np.abs(f - lb)) idx1 = np.argmin(np.abs(f - ub)) # Determine which should be on which side if f[0] > f[1]: direction = -1 else: direction = 1 if direction == -1: idx = slice(idx1, idx0) elif direction == 1: idx = slice(idx0, idx1) return idx
def add_for_targets(self, targets, classpath_elements): """Adds classpath path elements to the products of all the provided targets.""" for target in targets: self.add_for_target(target, classpath_elements)
Adds classpath path elements to the products of all the provided targets.
Below is the the instruction that describes the task: ### Input: Adds classpath path elements to the products of all the provided targets. ### Response: def add_for_targets(self, targets, classpath_elements): """Adds classpath path elements to the products of all the provided targets.""" for target in targets: self.add_for_target(target, classpath_elements)
def set_image(self, text): """ Save image resource at `text` (path or url) to storage, then return the replacement string and the necessary exercicse image file object. Args: - text (str): path or url to parse as an exercise image resource Returns: (new_text, files) - `new_text` (str): replacement string for the original `text` string - `files` (list): list of files that were downloaded from `text` """ # Make sure `text` hasn't already been processed if exercises.CONTENT_STORAGE_PLACEHOLDER in text: return text, [] # Strip `text` of whitespace stripped_text = text.strip().replace('\\n', '') # If `stripped_text` is a web+graphie: path, we need special processing graphie_regex = re.compile(WEB_GRAPHIE_URL_REGEX, flags=re.IGNORECASE) graphie_match = graphie_regex.match(stripped_text) if graphie_match: is_web_plus_graphie = True graphie_rawpath = graphie_match.groupdict()['rawpath'] graphie_path = graphie_rawpath.replace("//", "https://") exercise_image_file = _ExerciseGraphieFile(graphie_path) elif get_base64_encoding(stripped_text): is_web_plus_graphie = False exercise_image_file = _ExerciseBase64ImageFile(stripped_text) else: is_web_plus_graphie = False exercise_image_file = _ExerciseImageFile(stripped_text) # Setup link to assessment item exercise_image_file.assessment_item = self # Process file to make the replacement_str available _filename = exercise_image_file.process_file() # Get `new_text` = the replacement path for the image resource new_text = exercises.CONTENT_STORAGE_FORMAT.format(exercise_image_file.get_replacement_str()) if is_web_plus_graphie: # need to put back the `web+graphie:` prefix new_text = "web+graphie:" + new_text return new_text, [exercise_image_file]
Save image resource at `text` (path or url) to storage, then return the replacement string and the necessary exercicse image file object. Args: - text (str): path or url to parse as an exercise image resource Returns: (new_text, files) - `new_text` (str): replacement string for the original `text` string - `files` (list): list of files that were downloaded from `text`
Below is the the instruction that describes the task: ### Input: Save image resource at `text` (path or url) to storage, then return the replacement string and the necessary exercicse image file object. Args: - text (str): path or url to parse as an exercise image resource Returns: (new_text, files) - `new_text` (str): replacement string for the original `text` string - `files` (list): list of files that were downloaded from `text` ### Response: def set_image(self, text): """ Save image resource at `text` (path or url) to storage, then return the replacement string and the necessary exercicse image file object. Args: - text (str): path or url to parse as an exercise image resource Returns: (new_text, files) - `new_text` (str): replacement string for the original `text` string - `files` (list): list of files that were downloaded from `text` """ # Make sure `text` hasn't already been processed if exercises.CONTENT_STORAGE_PLACEHOLDER in text: return text, [] # Strip `text` of whitespace stripped_text = text.strip().replace('\\n', '') # If `stripped_text` is a web+graphie: path, we need special processing graphie_regex = re.compile(WEB_GRAPHIE_URL_REGEX, flags=re.IGNORECASE) graphie_match = graphie_regex.match(stripped_text) if graphie_match: is_web_plus_graphie = True graphie_rawpath = graphie_match.groupdict()['rawpath'] graphie_path = graphie_rawpath.replace("//", "https://") exercise_image_file = _ExerciseGraphieFile(graphie_path) elif get_base64_encoding(stripped_text): is_web_plus_graphie = False exercise_image_file = _ExerciseBase64ImageFile(stripped_text) else: is_web_plus_graphie = False exercise_image_file = _ExerciseImageFile(stripped_text) # Setup link to assessment item exercise_image_file.assessment_item = self # Process file to make the replacement_str available _filename = exercise_image_file.process_file() # Get `new_text` = the replacement path for the image resource new_text = exercises.CONTENT_STORAGE_FORMAT.format(exercise_image_file.get_replacement_str()) if is_web_plus_graphie: # need to put back the `web+graphie:` prefix new_text = "web+graphie:" + new_text return new_text, [exercise_image_file]
def get_genus_type(self): """Gets the genus type of this object. return: (osid.type.Type) - the genus type of this object *compliance: mandatory -- This method must be implemented.* """ try: # Try to stand up full Type objects if they can be found # (Also need to LOOK FOR THE TYPE IN types or through type lookup) genus_type_identifier = Id(self._my_map['genusTypeId']).get_identifier() return Type(**types.Genus().get_type_data(genus_type_identifier)) except: # If that doesn't work, return the id only type, still useful for comparison. return Type(idstr=self._my_map['genusTypeId'])
Gets the genus type of this object. return: (osid.type.Type) - the genus type of this object *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets the genus type of this object. return: (osid.type.Type) - the genus type of this object *compliance: mandatory -- This method must be implemented.* ### Response: def get_genus_type(self): """Gets the genus type of this object. return: (osid.type.Type) - the genus type of this object *compliance: mandatory -- This method must be implemented.* """ try: # Try to stand up full Type objects if they can be found # (Also need to LOOK FOR THE TYPE IN types or through type lookup) genus_type_identifier = Id(self._my_map['genusTypeId']).get_identifier() return Type(**types.Genus().get_type_data(genus_type_identifier)) except: # If that doesn't work, return the id only type, still useful for comparison. return Type(idstr=self._my_map['genusTypeId'])
def digest(self): """Return final digest value. """ if self._digest is None: if self._buf: self._add_block(self._buf) self._buf = EMPTY ctx = self._blake2s(0, 1, True) for t in self._thread: ctx.update(t.digest()) self._digest = ctx.digest() return self._digest
Return final digest value.
Below is the the instruction that describes the task: ### Input: Return final digest value. ### Response: def digest(self): """Return final digest value. """ if self._digest is None: if self._buf: self._add_block(self._buf) self._buf = EMPTY ctx = self._blake2s(0, 1, True) for t in self._thread: ctx.update(t.digest()) self._digest = ctx.digest() return self._digest
def value(self, t): """See Schedule.value""" for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]): if l_t <= t and t < r_t: alpha = float(t - l_t) / (r_t - l_t) return self._interpolation(l, r, alpha) # t does not belong to any of the pieces, so doom. assert self._outside_value is not None return self._outside_value
See Schedule.value
Below is the the instruction that describes the task: ### Input: See Schedule.value ### Response: def value(self, t): """See Schedule.value""" for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]): if l_t <= t and t < r_t: alpha = float(t - l_t) / (r_t - l_t) return self._interpolation(l, r, alpha) # t does not belong to any of the pieces, so doom. assert self._outside_value is not None return self._outside_value
def lower(self, lowering): """Lower the ReshapeOperation. Reshaping can require collective communication between processors. We haven't yet implemented all possible reshapes. We try to handle the common cases here - otherwise we raise a NotImplementedError. Args: lowering: a Lowering Raises: NotImplementedError: if we haven't covered this case """ old_shape = self.inputs[0].shape new_shape = self.outputs[0].shape mesh_impl = lowering.mesh_impl(self) slices = lowering.tensors[self.inputs[0]] mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape) mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to_cumprod(new_shape) # Figure out what needs to be done for different mesh-axes mesh_axes_allsplit = [] mesh_axes_allconcat = [] mesh_axes_alltoall = [] for mesh_axis, (old_cumprod, new_cumprod) in enumerate( zip(mesh_axis_to_cumprod_old, mesh_axis_to_cumprod_new)): if new_cumprod != old_cumprod: if old_cumprod is None: # split in new layout but not in old layout - we need an allsplit mesh_axes_allsplit.append(mesh_axis) elif new_cumprod is None: # split in old layout but not in new layout - we need an allconcat mesh_axes_allconcat.append(mesh_axis) else: # split differently in old and new layouts - we need an alltoall mesh_axes_alltoall.append(mesh_axis) laid_out_size = mesh_impl.laid_out_size(old_shape) for mesh_axis in mesh_axes_allsplit: tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_new[mesh_axis]) if tensor_axis is None: # TODO(noam): try to handle this case raise NotImplementedError( "Try first reshaping to insert a new tf dimension," " then changing layout. input_shape=%s output_shape=%s" % (self.inputs[0].shape, self.outputs[0].shape)) slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis) laid_out_size //= mesh_impl.shape[mesh_axis].size for mesh_axis in mesh_axes_alltoall: split_tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_new[mesh_axis]) if split_tensor_axis is None: # TODO(noam): try to handle this case raise NotImplementedError( "Try first reshaping to insert a new tf dimension," " then changing layout. input_shape=%s output_shape=%s" % (self.inputs[0].shape, self.outputs[0].shape)) concat_tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_old[mesh_axis]) assert concat_tensor_axis is not None slices = mesh_impl.alltoall( slices, mesh_axis, split_tensor_axis, concat_tensor_axis) lowering.add_counter( "alltoall/%s/reshape_op" % mesh_axis, laid_out_size) for mesh_axis in mesh_axes_allconcat: tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_old[mesh_axis]) assert tensor_axis is not None slices = mesh_impl.allconcat(slices, mesh_axis, tensor_axis) laid_out_size *= mesh_impl.shape[mesh_axis].size lowering.add_counter( "allconcat/%s/reshape_op" % mesh_axis, laid_out_size) # now reshape the slices old_slice_shape = mesh_impl.slice_shape(old_shape) new_slice_shape = mesh_impl.slice_shape(new_shape) if new_slice_shape != old_slice_shape: def reshape_fn(x): return tf.reshape(x, new_slice_shape) slices = mesh_impl.slicewise(reshape_fn, slices) lowering.set_tensor_lowering(self.outputs[0], slices)
Lower the ReshapeOperation. Reshaping can require collective communication between processors. We haven't yet implemented all possible reshapes. We try to handle the common cases here - otherwise we raise a NotImplementedError. Args: lowering: a Lowering Raises: NotImplementedError: if we haven't covered this case
Below is the the instruction that describes the task: ### Input: Lower the ReshapeOperation. Reshaping can require collective communication between processors. We haven't yet implemented all possible reshapes. We try to handle the common cases here - otherwise we raise a NotImplementedError. Args: lowering: a Lowering Raises: NotImplementedError: if we haven't covered this case ### Response: def lower(self, lowering): """Lower the ReshapeOperation. Reshaping can require collective communication between processors. We haven't yet implemented all possible reshapes. We try to handle the common cases here - otherwise we raise a NotImplementedError. Args: lowering: a Lowering Raises: NotImplementedError: if we haven't covered this case """ old_shape = self.inputs[0].shape new_shape = self.outputs[0].shape mesh_impl = lowering.mesh_impl(self) slices = lowering.tensors[self.inputs[0]] mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape) mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to_cumprod(new_shape) # Figure out what needs to be done for different mesh-axes mesh_axes_allsplit = [] mesh_axes_allconcat = [] mesh_axes_alltoall = [] for mesh_axis, (old_cumprod, new_cumprod) in enumerate( zip(mesh_axis_to_cumprod_old, mesh_axis_to_cumprod_new)): if new_cumprod != old_cumprod: if old_cumprod is None: # split in new layout but not in old layout - we need an allsplit mesh_axes_allsplit.append(mesh_axis) elif new_cumprod is None: # split in old layout but not in new layout - we need an allconcat mesh_axes_allconcat.append(mesh_axis) else: # split differently in old and new layouts - we need an alltoall mesh_axes_alltoall.append(mesh_axis) laid_out_size = mesh_impl.laid_out_size(old_shape) for mesh_axis in mesh_axes_allsplit: tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_new[mesh_axis]) if tensor_axis is None: # TODO(noam): try to handle this case raise NotImplementedError( "Try first reshaping to insert a new tf dimension," " then changing layout. input_shape=%s output_shape=%s" % (self.inputs[0].shape, self.outputs[0].shape)) slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis) laid_out_size //= mesh_impl.shape[mesh_axis].size for mesh_axis in mesh_axes_alltoall: split_tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_new[mesh_axis]) if split_tensor_axis is None: # TODO(noam): try to handle this case raise NotImplementedError( "Try first reshaping to insert a new tf dimension," " then changing layout. input_shape=%s output_shape=%s" % (self.inputs[0].shape, self.outputs[0].shape)) concat_tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_old[mesh_axis]) assert concat_tensor_axis is not None slices = mesh_impl.alltoall( slices, mesh_axis, split_tensor_axis, concat_tensor_axis) lowering.add_counter( "alltoall/%s/reshape_op" % mesh_axis, laid_out_size) for mesh_axis in mesh_axes_allconcat: tensor_axis = old_shape.cumprod_to_tensor_axis( mesh_axis_to_cumprod_old[mesh_axis]) assert tensor_axis is not None slices = mesh_impl.allconcat(slices, mesh_axis, tensor_axis) laid_out_size *= mesh_impl.shape[mesh_axis].size lowering.add_counter( "allconcat/%s/reshape_op" % mesh_axis, laid_out_size) # now reshape the slices old_slice_shape = mesh_impl.slice_shape(old_shape) new_slice_shape = mesh_impl.slice_shape(new_shape) if new_slice_shape != old_slice_shape: def reshape_fn(x): return tf.reshape(x, new_slice_shape) slices = mesh_impl.slicewise(reshape_fn, slices) lowering.set_tensor_lowering(self.outputs[0], slices)
def draw_layer(ax, layer): """Draws a layer on the given matplotlib axis. Args: ax (axis): the matplotlib axis to draw on layer (layer): the layers to plot """ ax.set_aspect('equal', 'datalim') ax.plot(*layer) ax.axis('off')
Draws a layer on the given matplotlib axis. Args: ax (axis): the matplotlib axis to draw on layer (layer): the layers to plot
Below is the the instruction that describes the task: ### Input: Draws a layer on the given matplotlib axis. Args: ax (axis): the matplotlib axis to draw on layer (layer): the layers to plot ### Response: def draw_layer(ax, layer): """Draws a layer on the given matplotlib axis. Args: ax (axis): the matplotlib axis to draw on layer (layer): the layers to plot """ ax.set_aspect('equal', 'datalim') ax.plot(*layer) ax.axis('off')
def tke(u, v, w, perturbation=False, axis=-1): r"""Compute turbulence kinetic energy. Compute the turbulence kinetic energy (e) from the time series of the velocity components. Parameters ---------- u : array_like The wind component along the x-axis v : array_like The wind component along the y-axis w : array_like The wind component along the z-axis perturbation : {False, True}, optional True if the `u`, `v`, and `w` components of wind speed supplied to the function are perturbation velocities. If False, perturbation velocities will be calculated by removing the mean value from each component. Returns ------- array_like The corresponding turbulence kinetic energy value Other Parameters ---------------- axis : int The index of the time axis. Default is -1 See Also -------- get_perturbation : Used to compute perturbations if `perturbation` is False. Notes ----- Turbulence Kinetic Energy is computed as: .. math:: e = 0.5 \sqrt{\overline{u^{\prime2}} + \overline{v^{\prime2}} + \overline{w^{\prime2}}}, where the velocity components .. math:: u^{\prime}, v^{\prime}, u^{\prime} are perturbation velocities. For more information on the subject, please see [Garratt1994]_. """ if not perturbation: u = get_perturbation(u, axis=axis) v = get_perturbation(v, axis=axis) w = get_perturbation(w, axis=axis) u_cont = np.mean(u * u, axis=axis) v_cont = np.mean(v * v, axis=axis) w_cont = np.mean(w * w, axis=axis) return 0.5 * np.sqrt(u_cont + v_cont + w_cont)
r"""Compute turbulence kinetic energy. Compute the turbulence kinetic energy (e) from the time series of the velocity components. Parameters ---------- u : array_like The wind component along the x-axis v : array_like The wind component along the y-axis w : array_like The wind component along the z-axis perturbation : {False, True}, optional True if the `u`, `v`, and `w` components of wind speed supplied to the function are perturbation velocities. If False, perturbation velocities will be calculated by removing the mean value from each component. Returns ------- array_like The corresponding turbulence kinetic energy value Other Parameters ---------------- axis : int The index of the time axis. Default is -1 See Also -------- get_perturbation : Used to compute perturbations if `perturbation` is False. Notes ----- Turbulence Kinetic Energy is computed as: .. math:: e = 0.5 \sqrt{\overline{u^{\prime2}} + \overline{v^{\prime2}} + \overline{w^{\prime2}}}, where the velocity components .. math:: u^{\prime}, v^{\prime}, u^{\prime} are perturbation velocities. For more information on the subject, please see [Garratt1994]_.
Below is the the instruction that describes the task: ### Input: r"""Compute turbulence kinetic energy. Compute the turbulence kinetic energy (e) from the time series of the velocity components. Parameters ---------- u : array_like The wind component along the x-axis v : array_like The wind component along the y-axis w : array_like The wind component along the z-axis perturbation : {False, True}, optional True if the `u`, `v`, and `w` components of wind speed supplied to the function are perturbation velocities. If False, perturbation velocities will be calculated by removing the mean value from each component. Returns ------- array_like The corresponding turbulence kinetic energy value Other Parameters ---------------- axis : int The index of the time axis. Default is -1 See Also -------- get_perturbation : Used to compute perturbations if `perturbation` is False. Notes ----- Turbulence Kinetic Energy is computed as: .. math:: e = 0.5 \sqrt{\overline{u^{\prime2}} + \overline{v^{\prime2}} + \overline{w^{\prime2}}}, where the velocity components .. math:: u^{\prime}, v^{\prime}, u^{\prime} are perturbation velocities. For more information on the subject, please see [Garratt1994]_. ### Response: def tke(u, v, w, perturbation=False, axis=-1): r"""Compute turbulence kinetic energy. Compute the turbulence kinetic energy (e) from the time series of the velocity components. Parameters ---------- u : array_like The wind component along the x-axis v : array_like The wind component along the y-axis w : array_like The wind component along the z-axis perturbation : {False, True}, optional True if the `u`, `v`, and `w` components of wind speed supplied to the function are perturbation velocities. If False, perturbation velocities will be calculated by removing the mean value from each component. Returns ------- array_like The corresponding turbulence kinetic energy value Other Parameters ---------------- axis : int The index of the time axis. Default is -1 See Also -------- get_perturbation : Used to compute perturbations if `perturbation` is False. Notes ----- Turbulence Kinetic Energy is computed as: .. math:: e = 0.5 \sqrt{\overline{u^{\prime2}} + \overline{v^{\prime2}} + \overline{w^{\prime2}}}, where the velocity components .. math:: u^{\prime}, v^{\prime}, u^{\prime} are perturbation velocities. For more information on the subject, please see [Garratt1994]_. """ if not perturbation: u = get_perturbation(u, axis=axis) v = get_perturbation(v, axis=axis) w = get_perturbation(w, axis=axis) u_cont = np.mean(u * u, axis=axis) v_cont = np.mean(v * v, axis=axis) w_cont = np.mean(w * w, axis=axis) return 0.5 * np.sqrt(u_cont + v_cont + w_cont)
def config(self, show_row_hdrs=True, show_col_hdrs=True, show_col_hdr_in_cell=False, auto_resize=True): """ Override the in-class params: @param show_row_hdrs : show row headers @param show_col_hdrs : show column headers @param show_col_hdr_in_cell : embed column header in each cell @param auto_resize : auto resize according to the size of terminal """ self.show_row_hdrs = show_row_hdrs self.show_col_hdrs = show_col_hdrs self.show_col_hdr_in_cell = show_col_hdr_in_cell
Override the in-class params: @param show_row_hdrs : show row headers @param show_col_hdrs : show column headers @param show_col_hdr_in_cell : embed column header in each cell @param auto_resize : auto resize according to the size of terminal
Below is the the instruction that describes the task: ### Input: Override the in-class params: @param show_row_hdrs : show row headers @param show_col_hdrs : show column headers @param show_col_hdr_in_cell : embed column header in each cell @param auto_resize : auto resize according to the size of terminal ### Response: def config(self, show_row_hdrs=True, show_col_hdrs=True, show_col_hdr_in_cell=False, auto_resize=True): """ Override the in-class params: @param show_row_hdrs : show row headers @param show_col_hdrs : show column headers @param show_col_hdr_in_cell : embed column header in each cell @param auto_resize : auto resize according to the size of terminal """ self.show_row_hdrs = show_row_hdrs self.show_col_hdrs = show_col_hdrs self.show_col_hdr_in_cell = show_col_hdr_in_cell
def _post(self, url, data={}, **kwargs): """Wrapper around request.post() to use the API prefix. Returns a JSON response.""" if 'files' in kwargs: req = self._session.post(self._api_prefix + url, auth=self._session.auth, data=data, **kwargs) return self._action(req) req = self._session.post(self._api_prefix + url, data=data, **kwargs) return self._action(req)
Wrapper around request.post() to use the API prefix. Returns a JSON response.
Below is the the instruction that describes the task: ### Input: Wrapper around request.post() to use the API prefix. Returns a JSON response. ### Response: def _post(self, url, data={}, **kwargs): """Wrapper around request.post() to use the API prefix. Returns a JSON response.""" if 'files' in kwargs: req = self._session.post(self._api_prefix + url, auth=self._session.auth, data=data, **kwargs) return self._action(req) req = self._session.post(self._api_prefix + url, data=data, **kwargs) return self._action(req)
def create_objective(dist, abscissas): """Create objective function.""" abscissas_ = numpy.array(abscissas[1:-1]) def obj(absisa): """Local objective function.""" out = -numpy.sqrt(dist.pdf(absisa)) out *= numpy.prod(numpy.abs(abscissas_ - absisa)) return out return obj
Create objective function.
Below is the the instruction that describes the task: ### Input: Create objective function. ### Response: def create_objective(dist, abscissas): """Create objective function.""" abscissas_ = numpy.array(abscissas[1:-1]) def obj(absisa): """Local objective function.""" out = -numpy.sqrt(dist.pdf(absisa)) out *= numpy.prod(numpy.abs(abscissas_ - absisa)) return out return obj
def RegisterAt(cls, *args, **kwargs): """ **RegisterAt** RegisterAt(n, f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None) Most of the time you don't want to register an method as such, that is, you don't care about the `self` builder object, instead you want to register a function that transforms the value being piped down the DSL. For this you can use `RegisterAt` so e.g. def some_fun(obj, arg1, arg2): # code @MyBuilder.RegisterMethod("my_lib.") def some_fun_wrapper(self, arg1, arg2): return self.ThenAt(1, some_fun, arg1, arg2) can be written directly as @MyBuilder.RegisterAt(1, "my_lib.") def some_fun(obj, arg1, arg2): # code For this case you can just use `Register` which is a shortcut for `RegisterAt(1, ...)` @MyBuilder.Register("my_lib.") def some_fun(obj, arg1, arg2): # code **Also See** * `phi.builder.Builder.RegisterMethod` """ unpack_error = True try: n, f, library_path = args unpack_error = False cls._RegisterAt(n, f, library_path, **kwargs) except: if not unpack_error: raise def register_decorator(f): n, library_path = args cls._RegisterAt(n, f, library_path, **kwargs) return f return register_decorator
**RegisterAt** RegisterAt(n, f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None) Most of the time you don't want to register an method as such, that is, you don't care about the `self` builder object, instead you want to register a function that transforms the value being piped down the DSL. For this you can use `RegisterAt` so e.g. def some_fun(obj, arg1, arg2): # code @MyBuilder.RegisterMethod("my_lib.") def some_fun_wrapper(self, arg1, arg2): return self.ThenAt(1, some_fun, arg1, arg2) can be written directly as @MyBuilder.RegisterAt(1, "my_lib.") def some_fun(obj, arg1, arg2): # code For this case you can just use `Register` which is a shortcut for `RegisterAt(1, ...)` @MyBuilder.Register("my_lib.") def some_fun(obj, arg1, arg2): # code **Also See** * `phi.builder.Builder.RegisterMethod`
Below is the the instruction that describes the task: ### Input: **RegisterAt** RegisterAt(n, f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None) Most of the time you don't want to register an method as such, that is, you don't care about the `self` builder object, instead you want to register a function that transforms the value being piped down the DSL. For this you can use `RegisterAt` so e.g. def some_fun(obj, arg1, arg2): # code @MyBuilder.RegisterMethod("my_lib.") def some_fun_wrapper(self, arg1, arg2): return self.ThenAt(1, some_fun, arg1, arg2) can be written directly as @MyBuilder.RegisterAt(1, "my_lib.") def some_fun(obj, arg1, arg2): # code For this case you can just use `Register` which is a shortcut for `RegisterAt(1, ...)` @MyBuilder.Register("my_lib.") def some_fun(obj, arg1, arg2): # code **Also See** * `phi.builder.Builder.RegisterMethod` ### Response: def RegisterAt(cls, *args, **kwargs): """ **RegisterAt** RegisterAt(n, f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None) Most of the time you don't want to register an method as such, that is, you don't care about the `self` builder object, instead you want to register a function that transforms the value being piped down the DSL. For this you can use `RegisterAt` so e.g. def some_fun(obj, arg1, arg2): # code @MyBuilder.RegisterMethod("my_lib.") def some_fun_wrapper(self, arg1, arg2): return self.ThenAt(1, some_fun, arg1, arg2) can be written directly as @MyBuilder.RegisterAt(1, "my_lib.") def some_fun(obj, arg1, arg2): # code For this case you can just use `Register` which is a shortcut for `RegisterAt(1, ...)` @MyBuilder.Register("my_lib.") def some_fun(obj, arg1, arg2): # code **Also See** * `phi.builder.Builder.RegisterMethod` """ unpack_error = True try: n, f, library_path = args unpack_error = False cls._RegisterAt(n, f, library_path, **kwargs) except: if not unpack_error: raise def register_decorator(f): n, library_path = args cls._RegisterAt(n, f, library_path, **kwargs) return f return register_decorator
def action_stats(self, hostname=None): "Shows stats (possibly limited by hostname)" format = "%-35s %-11s %-11s %-11s %-11s" print format % ("HOST", "OPEN", "COMPLETED", "BYTES IN", "BYTES OUT") for host, details in sorted(self.client.stats(hostname).items()): print format % ( host, details.get("open_requests", 0), details.get("completed_requests", 0), details.get("bytes_received", 0), details.get("bytes_sent", 0), )
Shows stats (possibly limited by hostname)
Below is the the instruction that describes the task: ### Input: Shows stats (possibly limited by hostname) ### Response: def action_stats(self, hostname=None): "Shows stats (possibly limited by hostname)" format = "%-35s %-11s %-11s %-11s %-11s" print format % ("HOST", "OPEN", "COMPLETED", "BYTES IN", "BYTES OUT") for host, details in sorted(self.client.stats(hostname).items()): print format % ( host, details.get("open_requests", 0), details.get("completed_requests", 0), details.get("bytes_received", 0), details.get("bytes_sent", 0), )
def main_loop(args): '''main loop logic for trial keeper''' if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) stdout_file = open(STDOUT_FULL_PATH, 'a+') stderr_file = open(STDERR_FULL_PATH, 'a+') trial_keeper_syslogger = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial_keeper', StdOutputType.Stdout, args.log_collection) # redirect trial keeper's stdout and stderr to syslog trial_syslogger_stdout = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial', StdOutputType.Stdout, args.log_collection) sys.stdout = sys.stderr = trial_keeper_syslogger # backward compatibility hdfs_host = None hdfs_output_dir = None if args.hdfs_host: hdfs_host = args.hdfs_host elif args.pai_hdfs_host: hdfs_host = args.pai_hdfs_host if args.hdfs_output_dir: hdfs_output_dir = args.hdfs_output_dir elif args.pai_hdfs_output_dir: hdfs_output_dir = args.pai_hdfs_output_dir if hdfs_host is not None and args.nni_hdfs_exp_dir is not None: try: if args.webhdfs_path: hdfs_client = HdfsClient(hosts='{0}:80'.format(hdfs_host), user_name=args.pai_user_name, webhdfs_path=args.webhdfs_path, timeout=5) else: # backward compatibility hdfs_client = HdfsClient(hosts='{0}:{1}'.format(hdfs_host, '50070'), user_name=args.pai_user_name, timeout=5) except Exception as e: nni_log(LogType.Error, 'Create HDFS client error: ' + str(e)) raise e copyHdfsDirectoryToLocal(args.nni_hdfs_exp_dir, os.getcwd(), hdfs_client) # Notice: We don't appoint env, which means subprocess wil inherit current environment and that is expected behavior log_pipe_stdout = trial_syslogger_stdout.get_pipelog_reader() process = Popen(args.trial_command, shell = True, stdout = log_pipe_stdout, stderr = log_pipe_stdout) nni_log(LogType.Info, 'Trial keeper spawns a subprocess (pid {0}) to run command: {1}'.format(process.pid, shlex.split(args.trial_command))) while True: retCode = process.poll() # child worker process exits and all stdout data is read if retCode is not None and log_pipe_stdout.set_process_exit() and log_pipe_stdout.is_read_completed == True: nni_log(LogType.Info, 'subprocess terminated. Exit code is {}. Quit'.format(retCode)) if hdfs_output_dir is not None: # Copy local directory to hdfs for OpenPAI nni_local_output_dir = os.environ['NNI_OUTPUT_DIR'] try: if copyDirectoryToHdfs(nni_local_output_dir, hdfs_output_dir, hdfs_client): nni_log(LogType.Info, 'copy directory from {0} to {1} success!'.format(nni_local_output_dir, hdfs_output_dir)) else: nni_log(LogType.Info, 'copy directory from {0} to {1} failed!'.format(nni_local_output_dir, hdfs_output_dir)) except Exception as e: nni_log(LogType.Error, 'HDFS copy directory got exception: ' + str(e)) raise e ## Exit as the retCode of subprocess(trial) exit(retCode) break time.sleep(2)
main loop logic for trial keeper
Below is the the instruction that describes the task: ### Input: main loop logic for trial keeper ### Response: def main_loop(args): '''main loop logic for trial keeper''' if not os.path.exists(LOG_DIR): os.makedirs(LOG_DIR) stdout_file = open(STDOUT_FULL_PATH, 'a+') stderr_file = open(STDERR_FULL_PATH, 'a+') trial_keeper_syslogger = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial_keeper', StdOutputType.Stdout, args.log_collection) # redirect trial keeper's stdout and stderr to syslog trial_syslogger_stdout = RemoteLogger(args.nnimanager_ip, args.nnimanager_port, 'trial', StdOutputType.Stdout, args.log_collection) sys.stdout = sys.stderr = trial_keeper_syslogger # backward compatibility hdfs_host = None hdfs_output_dir = None if args.hdfs_host: hdfs_host = args.hdfs_host elif args.pai_hdfs_host: hdfs_host = args.pai_hdfs_host if args.hdfs_output_dir: hdfs_output_dir = args.hdfs_output_dir elif args.pai_hdfs_output_dir: hdfs_output_dir = args.pai_hdfs_output_dir if hdfs_host is not None and args.nni_hdfs_exp_dir is not None: try: if args.webhdfs_path: hdfs_client = HdfsClient(hosts='{0}:80'.format(hdfs_host), user_name=args.pai_user_name, webhdfs_path=args.webhdfs_path, timeout=5) else: # backward compatibility hdfs_client = HdfsClient(hosts='{0}:{1}'.format(hdfs_host, '50070'), user_name=args.pai_user_name, timeout=5) except Exception as e: nni_log(LogType.Error, 'Create HDFS client error: ' + str(e)) raise e copyHdfsDirectoryToLocal(args.nni_hdfs_exp_dir, os.getcwd(), hdfs_client) # Notice: We don't appoint env, which means subprocess wil inherit current environment and that is expected behavior log_pipe_stdout = trial_syslogger_stdout.get_pipelog_reader() process = Popen(args.trial_command, shell = True, stdout = log_pipe_stdout, stderr = log_pipe_stdout) nni_log(LogType.Info, 'Trial keeper spawns a subprocess (pid {0}) to run command: {1}'.format(process.pid, shlex.split(args.trial_command))) while True: retCode = process.poll() # child worker process exits and all stdout data is read if retCode is not None and log_pipe_stdout.set_process_exit() and log_pipe_stdout.is_read_completed == True: nni_log(LogType.Info, 'subprocess terminated. Exit code is {}. Quit'.format(retCode)) if hdfs_output_dir is not None: # Copy local directory to hdfs for OpenPAI nni_local_output_dir = os.environ['NNI_OUTPUT_DIR'] try: if copyDirectoryToHdfs(nni_local_output_dir, hdfs_output_dir, hdfs_client): nni_log(LogType.Info, 'copy directory from {0} to {1} success!'.format(nni_local_output_dir, hdfs_output_dir)) else: nni_log(LogType.Info, 'copy directory from {0} to {1} failed!'.format(nni_local_output_dir, hdfs_output_dir)) except Exception as e: nni_log(LogType.Error, 'HDFS copy directory got exception: ' + str(e)) raise e ## Exit as the retCode of subprocess(trial) exit(retCode) break time.sleep(2)
def parse_geometry(ml_log, log=None, ml_version='2016.12', print_output=False): """Parse the ml_log file generated by the measure_geometry function. Warnings: Not all keys may exist if mesh is not watertight or manifold Args: ml_log (str): MeshLab log file to parse log (str): filename to log output """ # TODO: read more than one occurrence per file. Record in list. aabb = {} geometry = {'aabb':aabb} with open(ml_log) as fread: for line in fread: if 'Mesh Bounding Box min' in line: #2016.12 geometry['aabb']['min'] = (line.split()[4:7]) geometry['aabb']['min'] = [util.to_float(val) for val in geometry['aabb']['min']] if 'Mesh Bounding Box max' in line: #2016.12 geometry['aabb']['max'] = (line.split()[4:7]) geometry['aabb']['max'] = [util.to_float(val) for val in geometry['aabb']['max']] if 'Mesh Bounding Box Size' in line: #2016.12 geometry['aabb']['size'] = (line.split()[4:7]) geometry['aabb']['size'] = [util.to_float(val) for val in geometry['aabb']['size']] if 'Mesh Bounding Box Diag' in line: #2016.12 geometry['aabb']['diagonal'] = util.to_float(line.split()[4]) if 'Mesh Volume' in line: geometry['volume_mm3'] = util.to_float(line.split()[3]) geometry['volume_cm3'] = geometry['volume_mm3'] * 0.001 if 'Mesh Surface' in line: if ml_version == '1.3.4BETA': geometry['area_mm2'] = util.to_float(line.split()[3]) else: geometry['area_mm2'] = util.to_float(line.split()[4]) geometry['area_cm2'] = geometry['area_mm2'] * 0.01 if 'Mesh Total Len of' in line: if 'including faux edges' in line: geometry['total_edge_length_incl_faux'] = util.to_float( line.split()[7]) else: geometry['total_edge_length'] = util.to_float( line.split()[7]) if 'Thin shell barycenter' in line: geometry['barycenter'] = (line.split()[3:6]) geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']] if 'Thin shell (faces) barycenter' in line: #2016.12 geometry['barycenter'] = (line.split()[4:7]) geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']] if 'Vertices barycenter' in line: #2016.12 geometry['vert_barycenter'] = (line.split()[2:5]) geometry['vert_barycenter'] = [util.to_float(val) for val in geometry['vert_barycenter']] if 'Center of Mass' in line: geometry['center_of_mass'] = (line.split()[4:7]) geometry['center_of_mass'] = [util.to_float(val) for val in geometry['center_of_mass']] if 'Inertia Tensor' in line: geometry['inertia_tensor'] = [] for val in range(3): row = (next(fread, val).split()[1:4]) row = [util.to_float(b) for b in row] geometry['inertia_tensor'].append(row) if 'Principal axes' in line: geometry['principal_axes'] = [] for val in range(3): row = (next(fread, val).split()[1:4]) row = [util.to_float(b) for b in row] geometry['principal_axes'].append(row) if 'axis momenta' in line: geometry['axis_momenta'] = (next(fread).split()[1:4]) geometry['axis_momenta'] = [util.to_float(val) for val in geometry['axis_momenta']] break # stop after we find the first match for key, value in geometry.items(): if log is not None: log_file = open(log, 'a') log_file.write('{:27} = {}\n'.format(key, value)) log_file.close() elif print_output: print('{:27} = {}'.format(key, value)) return geometry
Parse the ml_log file generated by the measure_geometry function. Warnings: Not all keys may exist if mesh is not watertight or manifold Args: ml_log (str): MeshLab log file to parse log (str): filename to log output
Below is the the instruction that describes the task: ### Input: Parse the ml_log file generated by the measure_geometry function. Warnings: Not all keys may exist if mesh is not watertight or manifold Args: ml_log (str): MeshLab log file to parse log (str): filename to log output ### Response: def parse_geometry(ml_log, log=None, ml_version='2016.12', print_output=False): """Parse the ml_log file generated by the measure_geometry function. Warnings: Not all keys may exist if mesh is not watertight or manifold Args: ml_log (str): MeshLab log file to parse log (str): filename to log output """ # TODO: read more than one occurrence per file. Record in list. aabb = {} geometry = {'aabb':aabb} with open(ml_log) as fread: for line in fread: if 'Mesh Bounding Box min' in line: #2016.12 geometry['aabb']['min'] = (line.split()[4:7]) geometry['aabb']['min'] = [util.to_float(val) for val in geometry['aabb']['min']] if 'Mesh Bounding Box max' in line: #2016.12 geometry['aabb']['max'] = (line.split()[4:7]) geometry['aabb']['max'] = [util.to_float(val) for val in geometry['aabb']['max']] if 'Mesh Bounding Box Size' in line: #2016.12 geometry['aabb']['size'] = (line.split()[4:7]) geometry['aabb']['size'] = [util.to_float(val) for val in geometry['aabb']['size']] if 'Mesh Bounding Box Diag' in line: #2016.12 geometry['aabb']['diagonal'] = util.to_float(line.split()[4]) if 'Mesh Volume' in line: geometry['volume_mm3'] = util.to_float(line.split()[3]) geometry['volume_cm3'] = geometry['volume_mm3'] * 0.001 if 'Mesh Surface' in line: if ml_version == '1.3.4BETA': geometry['area_mm2'] = util.to_float(line.split()[3]) else: geometry['area_mm2'] = util.to_float(line.split()[4]) geometry['area_cm2'] = geometry['area_mm2'] * 0.01 if 'Mesh Total Len of' in line: if 'including faux edges' in line: geometry['total_edge_length_incl_faux'] = util.to_float( line.split()[7]) else: geometry['total_edge_length'] = util.to_float( line.split()[7]) if 'Thin shell barycenter' in line: geometry['barycenter'] = (line.split()[3:6]) geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']] if 'Thin shell (faces) barycenter' in line: #2016.12 geometry['barycenter'] = (line.split()[4:7]) geometry['barycenter'] = [util.to_float(val) for val in geometry['barycenter']] if 'Vertices barycenter' in line: #2016.12 geometry['vert_barycenter'] = (line.split()[2:5]) geometry['vert_barycenter'] = [util.to_float(val) for val in geometry['vert_barycenter']] if 'Center of Mass' in line: geometry['center_of_mass'] = (line.split()[4:7]) geometry['center_of_mass'] = [util.to_float(val) for val in geometry['center_of_mass']] if 'Inertia Tensor' in line: geometry['inertia_tensor'] = [] for val in range(3): row = (next(fread, val).split()[1:4]) row = [util.to_float(b) for b in row] geometry['inertia_tensor'].append(row) if 'Principal axes' in line: geometry['principal_axes'] = [] for val in range(3): row = (next(fread, val).split()[1:4]) row = [util.to_float(b) for b in row] geometry['principal_axes'].append(row) if 'axis momenta' in line: geometry['axis_momenta'] = (next(fread).split()[1:4]) geometry['axis_momenta'] = [util.to_float(val) for val in geometry['axis_momenta']] break # stop after we find the first match for key, value in geometry.items(): if log is not None: log_file = open(log, 'a') log_file.write('{:27} = {}\n'.format(key, value)) log_file.close() elif print_output: print('{:27} = {}'.format(key, value)) return geometry
def get_config(self): """Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. Returns: config: A Python dictionary of class keyword arguments and their serialized values. """ config = { 'units': self.units, 'activation': (tf.keras.activations.serialize(self.activation) if self.activation else None), 'activity_regularizer': tf.keras.initializers.serialize(self.activity_regularizer), } function_keys = [ 'kernel_posterior_fn', 'kernel_posterior_tensor_fn', 'kernel_prior_fn', 'kernel_divergence_fn', 'bias_posterior_fn', 'bias_posterior_tensor_fn', 'bias_prior_fn', 'bias_divergence_fn', ] for function_key in function_keys: function = getattr(self, function_key) if function is None: function_name = None function_type = None else: function_name, function_type = tfp_layers_util.serialize_function( function) config[function_key] = function_name config[function_key + '_type'] = function_type base_config = super(_DenseVariational, self).get_config() return dict(list(base_config.items()) + list(config.items()))
Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. Returns: config: A Python dictionary of class keyword arguments and their serialized values.
Below is the the instruction that describes the task: ### Input: Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. Returns: config: A Python dictionary of class keyword arguments and their serialized values. ### Response: def get_config(self): """Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. Returns: config: A Python dictionary of class keyword arguments and their serialized values. """ config = { 'units': self.units, 'activation': (tf.keras.activations.serialize(self.activation) if self.activation else None), 'activity_regularizer': tf.keras.initializers.serialize(self.activity_regularizer), } function_keys = [ 'kernel_posterior_fn', 'kernel_posterior_tensor_fn', 'kernel_prior_fn', 'kernel_divergence_fn', 'bias_posterior_fn', 'bias_posterior_tensor_fn', 'bias_prior_fn', 'bias_divergence_fn', ] for function_key in function_keys: function = getattr(self, function_key) if function is None: function_name = None function_type = None else: function_name, function_type = tfp_layers_util.serialize_function( function) config[function_key] = function_name config[function_key + '_type'] = function_type base_config = super(_DenseVariational, self).get_config() return dict(list(base_config.items()) + list(config.items()))
def get_user_groups(self, user): """ Get user's group memberships. Args: user (string): User name. Returns: (list): User's groups. Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) return self.project_service.get_user_groups(user)
Get user's group memberships. Args: user (string): User name. Returns: (list): User's groups. Raises: requests.HTTPError on failure.
Below is the the instruction that describes the task: ### Input: Get user's group memberships. Args: user (string): User name. Returns: (list): User's groups. Raises: requests.HTTPError on failure. ### Response: def get_user_groups(self, user): """ Get user's group memberships. Args: user (string): User name. Returns: (list): User's groups. Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) return self.project_service.get_user_groups(user)
def _find_evil(self): """A utility function that computes a list of missing couplers which should connect two working qubits in the same cell. The presence of (a nonconstant number of) these breaks the polynomial-time claim for our algorithm. Note: we're only actually hurt by missing intercell couplers. """ M, N, L = self.M, self.N, self.L proc = self._proc0 evil = [] cells = [(x, y) for x in range(M) for y in range(N)] spots = [(u, v) for u in range(L) for v in range(L)] for x, y in cells: for u, v in spots: p = (x, y, 0, u) q = (x, y, 1, v) if p in proc and q in proc and p not in proc[q]: evil.append((p, q)) self._evil = evil
A utility function that computes a list of missing couplers which should connect two working qubits in the same cell. The presence of (a nonconstant number of) these breaks the polynomial-time claim for our algorithm. Note: we're only actually hurt by missing intercell couplers.
Below is the the instruction that describes the task: ### Input: A utility function that computes a list of missing couplers which should connect two working qubits in the same cell. The presence of (a nonconstant number of) these breaks the polynomial-time claim for our algorithm. Note: we're only actually hurt by missing intercell couplers. ### Response: def _find_evil(self): """A utility function that computes a list of missing couplers which should connect two working qubits in the same cell. The presence of (a nonconstant number of) these breaks the polynomial-time claim for our algorithm. Note: we're only actually hurt by missing intercell couplers. """ M, N, L = self.M, self.N, self.L proc = self._proc0 evil = [] cells = [(x, y) for x in range(M) for y in range(N)] spots = [(u, v) for u in range(L) for v in range(L)] for x, y in cells: for u, v in spots: p = (x, y, 0, u) q = (x, y, 1, v) if p in proc and q in proc and p not in proc[q]: evil.append((p, q)) self._evil = evil
def get_primary_key_columns(self): """ Returns the primary key columns. :rtype: list """ if not self.has_primary_key(): raise DBALException('Table "%s" has no primary key.' % self.get_name()) return self.get_primary_key().get_columns()
Returns the primary key columns. :rtype: list
Below is the the instruction that describes the task: ### Input: Returns the primary key columns. :rtype: list ### Response: def get_primary_key_columns(self): """ Returns the primary key columns. :rtype: list """ if not self.has_primary_key(): raise DBALException('Table "%s" has no primary key.' % self.get_name()) return self.get_primary_key().get_columns()
def create_configuration(self, node, ports): """Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] } """ target_raid_config = node.get('target_raid_config', {}).copy() return hpssa_manager.create_configuration( raid_config=target_raid_config)
Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] }
Below is the the instruction that describes the task: ### Input: Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] } ### Response: def create_configuration(self, node, ports): """Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] } """ target_raid_config = node.get('target_raid_config', {}).copy() return hpssa_manager.create_configuration( raid_config=target_raid_config)
def create_releasenotes(project_dir=os.curdir, bugtracker_url=''): """ Creates the release notes file, if not in a package. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: None Raises: RuntimeError: If the release notes could not be retrieved """ pkg_info_file = os.path.join(project_dir, 'PKG-INFO') if os.path.exists(pkg_info_file): return with open('RELEASE_NOTES', 'wb') as releasenotes_fd: releasenotes_fd.write( get_releasenotes( project_dir=project_dir, bugtracker_url=bugtracker_url, ).encode('utf-8') + b'\n' )
Creates the release notes file, if not in a package. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: None Raises: RuntimeError: If the release notes could not be retrieved
Below is the the instruction that describes the task: ### Input: Creates the release notes file, if not in a package. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: None Raises: RuntimeError: If the release notes could not be retrieved ### Response: def create_releasenotes(project_dir=os.curdir, bugtracker_url=''): """ Creates the release notes file, if not in a package. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: None Raises: RuntimeError: If the release notes could not be retrieved """ pkg_info_file = os.path.join(project_dir, 'PKG-INFO') if os.path.exists(pkg_info_file): return with open('RELEASE_NOTES', 'wb') as releasenotes_fd: releasenotes_fd.write( get_releasenotes( project_dir=project_dir, bugtracker_url=bugtracker_url, ).encode('utf-8') + b'\n' )
def ceil(self, value, *args): """ Ceil number args: value (str): target returns: str """ n, u = utility.analyze_number(value) return utility.with_unit(int(math.ceil(n)), u)
Ceil number args: value (str): target returns: str
Below is the the instruction that describes the task: ### Input: Ceil number args: value (str): target returns: str ### Response: def ceil(self, value, *args): """ Ceil number args: value (str): target returns: str """ n, u = utility.analyze_number(value) return utility.with_unit(int(math.ceil(n)), u)
def do_cp(self, params): """ \x1b[1mNAME\x1b[0m cp - Copy from/to local/remote or remote/remote paths \x1b[1mSYNOPSIS\x1b[0m cp <src> <dst> [recursive] [overwrite] [asynchronous] [verbose] [max_items] \x1b[1mDESCRIPTION\x1b[0m src and dst can be: /some/path (in the connected server) zk://[scheme:user:passwd@]host/<path> json://!some!path!backup.json/some/path file:///some/file with a few restrictions. Given the semantic differences that znodes have with filesystem directories recursive copying from znodes to an fs could lose data, but to a JSON file it would work just fine. \x1b[1mOPTIONS\x1b[0m * recursive: recursively copy src (default: false) * overwrite: overwrite the dst path (default: false) * asynchronous: do asynchronous copies (default: false) * verbose: verbose output of every path (default: false) * max_items: max number of paths to copy (0 is infinite) (default: 0) \x1b[1mEXAMPLES\x1b[0m > cp /some/znode /backup/copy-znode # local > cp /some/znode zk://digest:bernie:pasta@10.0.0.1/backup true true > cp /some/znode json://!home!user!backup.json/ true true > cp file:///tmp/file /some/zone # fs to zk """ try: self.copy(params, params.recursive, params.overwrite, params.max_items, False) except AuthFailedError: self.show_output("Authentication failed.")
\x1b[1mNAME\x1b[0m cp - Copy from/to local/remote or remote/remote paths \x1b[1mSYNOPSIS\x1b[0m cp <src> <dst> [recursive] [overwrite] [asynchronous] [verbose] [max_items] \x1b[1mDESCRIPTION\x1b[0m src and dst can be: /some/path (in the connected server) zk://[scheme:user:passwd@]host/<path> json://!some!path!backup.json/some/path file:///some/file with a few restrictions. Given the semantic differences that znodes have with filesystem directories recursive copying from znodes to an fs could lose data, but to a JSON file it would work just fine. \x1b[1mOPTIONS\x1b[0m * recursive: recursively copy src (default: false) * overwrite: overwrite the dst path (default: false) * asynchronous: do asynchronous copies (default: false) * verbose: verbose output of every path (default: false) * max_items: max number of paths to copy (0 is infinite) (default: 0) \x1b[1mEXAMPLES\x1b[0m > cp /some/znode /backup/copy-znode # local > cp /some/znode zk://digest:bernie:pasta@10.0.0.1/backup true true > cp /some/znode json://!home!user!backup.json/ true true > cp file:///tmp/file /some/zone # fs to zk
Below is the the instruction that describes the task: ### Input: \x1b[1mNAME\x1b[0m cp - Copy from/to local/remote or remote/remote paths \x1b[1mSYNOPSIS\x1b[0m cp <src> <dst> [recursive] [overwrite] [asynchronous] [verbose] [max_items] \x1b[1mDESCRIPTION\x1b[0m src and dst can be: /some/path (in the connected server) zk://[scheme:user:passwd@]host/<path> json://!some!path!backup.json/some/path file:///some/file with a few restrictions. Given the semantic differences that znodes have with filesystem directories recursive copying from znodes to an fs could lose data, but to a JSON file it would work just fine. \x1b[1mOPTIONS\x1b[0m * recursive: recursively copy src (default: false) * overwrite: overwrite the dst path (default: false) * asynchronous: do asynchronous copies (default: false) * verbose: verbose output of every path (default: false) * max_items: max number of paths to copy (0 is infinite) (default: 0) \x1b[1mEXAMPLES\x1b[0m > cp /some/znode /backup/copy-znode # local > cp /some/znode zk://digest:bernie:pasta@10.0.0.1/backup true true > cp /some/znode json://!home!user!backup.json/ true true > cp file:///tmp/file /some/zone # fs to zk ### Response: def do_cp(self, params): """ \x1b[1mNAME\x1b[0m cp - Copy from/to local/remote or remote/remote paths \x1b[1mSYNOPSIS\x1b[0m cp <src> <dst> [recursive] [overwrite] [asynchronous] [verbose] [max_items] \x1b[1mDESCRIPTION\x1b[0m src and dst can be: /some/path (in the connected server) zk://[scheme:user:passwd@]host/<path> json://!some!path!backup.json/some/path file:///some/file with a few restrictions. Given the semantic differences that znodes have with filesystem directories recursive copying from znodes to an fs could lose data, but to a JSON file it would work just fine. \x1b[1mOPTIONS\x1b[0m * recursive: recursively copy src (default: false) * overwrite: overwrite the dst path (default: false) * asynchronous: do asynchronous copies (default: false) * verbose: verbose output of every path (default: false) * max_items: max number of paths to copy (0 is infinite) (default: 0) \x1b[1mEXAMPLES\x1b[0m > cp /some/znode /backup/copy-znode # local > cp /some/znode zk://digest:bernie:pasta@10.0.0.1/backup true true > cp /some/znode json://!home!user!backup.json/ true true > cp file:///tmp/file /some/zone # fs to zk """ try: self.copy(params, params.recursive, params.overwrite, params.max_items, False) except AuthFailedError: self.show_output("Authentication failed.")
def is_ambiguous(self, dt): """ Whether or not the "wall time" of a given datetime is ambiguous in this zone. :param dt: A :py:class:`datetime.datetime`, naive or time zone aware. :return: Returns ``True`` if ambiguous, ``False`` otherwise. .. versionadded:: 2.6.0 """ dt = dt.replace(tzinfo=self) wall_0 = enfold(dt, fold=0) wall_1 = enfold(dt, fold=1) same_offset = wall_0.utcoffset() == wall_1.utcoffset() same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) return same_dt and not same_offset
Whether or not the "wall time" of a given datetime is ambiguous in this zone. :param dt: A :py:class:`datetime.datetime`, naive or time zone aware. :return: Returns ``True`` if ambiguous, ``False`` otherwise. .. versionadded:: 2.6.0
Below is the the instruction that describes the task: ### Input: Whether or not the "wall time" of a given datetime is ambiguous in this zone. :param dt: A :py:class:`datetime.datetime`, naive or time zone aware. :return: Returns ``True`` if ambiguous, ``False`` otherwise. .. versionadded:: 2.6.0 ### Response: def is_ambiguous(self, dt): """ Whether or not the "wall time" of a given datetime is ambiguous in this zone. :param dt: A :py:class:`datetime.datetime`, naive or time zone aware. :return: Returns ``True`` if ambiguous, ``False`` otherwise. .. versionadded:: 2.6.0 """ dt = dt.replace(tzinfo=self) wall_0 = enfold(dt, fold=0) wall_1 = enfold(dt, fold=1) same_offset = wall_0.utcoffset() == wall_1.utcoffset() same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) return same_dt and not same_offset
def create(self, path, lock): """Create a direct lock for a resource path. path: Normalized path (utf8 encoded string, no trailing '/') lock: lock dictionary, without a token entry Returns: New unique lock token.: <lock **Note:** the lock dictionary may be modified on return: - lock['root'] is ignored and set to the normalized <path> - lock['timeout'] may be normalized and shorter than requested - lock['token'] is added """ self._lock.acquire_write() try: # We expect only a lock definition, not an existing lock assert lock.get("token") is None assert lock.get("expire") is None, "Use timeout instead of expire" assert path and "/" in path # Normalize root: /foo/bar org_path = path path = normalize_lock_root(path) lock["root"] = path # Normalize timeout from ttl to expire-date timeout = float(lock.get("timeout")) if timeout is None: timeout = LockStorageDict.LOCK_TIME_OUT_DEFAULT elif timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX: timeout = LockStorageDict.LOCK_TIME_OUT_MAX lock["timeout"] = timeout lock["expire"] = time.time() + timeout validate_lock(lock) token = generate_lock_token() lock["token"] = token # Store lock self._dict[token] = lock # Store locked path reference key = "URL2TOKEN:{}".format(path) if key not in self._dict: self._dict[key] = [token] else: # Note: Shelve dictionary returns copies, so we must reassign # values: tokList = self._dict[key] tokList.append(token) self._dict[key] = tokList self._flush() _logger.debug( "LockStorageDict.set({!r}): {}".format(org_path, lock_string(lock)) ) return lock finally: self._lock.release()
Create a direct lock for a resource path. path: Normalized path (utf8 encoded string, no trailing '/') lock: lock dictionary, without a token entry Returns: New unique lock token.: <lock **Note:** the lock dictionary may be modified on return: - lock['root'] is ignored and set to the normalized <path> - lock['timeout'] may be normalized and shorter than requested - lock['token'] is added
Below is the the instruction that describes the task: ### Input: Create a direct lock for a resource path. path: Normalized path (utf8 encoded string, no trailing '/') lock: lock dictionary, without a token entry Returns: New unique lock token.: <lock **Note:** the lock dictionary may be modified on return: - lock['root'] is ignored and set to the normalized <path> - lock['timeout'] may be normalized and shorter than requested - lock['token'] is added ### Response: def create(self, path, lock): """Create a direct lock for a resource path. path: Normalized path (utf8 encoded string, no trailing '/') lock: lock dictionary, without a token entry Returns: New unique lock token.: <lock **Note:** the lock dictionary may be modified on return: - lock['root'] is ignored and set to the normalized <path> - lock['timeout'] may be normalized and shorter than requested - lock['token'] is added """ self._lock.acquire_write() try: # We expect only a lock definition, not an existing lock assert lock.get("token") is None assert lock.get("expire") is None, "Use timeout instead of expire" assert path and "/" in path # Normalize root: /foo/bar org_path = path path = normalize_lock_root(path) lock["root"] = path # Normalize timeout from ttl to expire-date timeout = float(lock.get("timeout")) if timeout is None: timeout = LockStorageDict.LOCK_TIME_OUT_DEFAULT elif timeout < 0 or timeout > LockStorageDict.LOCK_TIME_OUT_MAX: timeout = LockStorageDict.LOCK_TIME_OUT_MAX lock["timeout"] = timeout lock["expire"] = time.time() + timeout validate_lock(lock) token = generate_lock_token() lock["token"] = token # Store lock self._dict[token] = lock # Store locked path reference key = "URL2TOKEN:{}".format(path) if key not in self._dict: self._dict[key] = [token] else: # Note: Shelve dictionary returns copies, so we must reassign # values: tokList = self._dict[key] tokList.append(token) self._dict[key] = tokList self._flush() _logger.debug( "LockStorageDict.set({!r}): {}".format(org_path, lock_string(lock)) ) return lock finally: self._lock.release()
def get_specs_depth_first(self): """ Get the specs for all processes (including called ones), in depth first order. """ done = set() specs = [self] def recursive_find(task_spec): if task_spec in done: return done.add(task_spec) if hasattr(task_spec, 'spec'): specs.append(task_spec.spec) recursive_find(task_spec.spec.start) for t in task_spec.outputs: recursive_find(t) recursive_find(self.start) return specs
Get the specs for all processes (including called ones), in depth first order.
Below is the the instruction that describes the task: ### Input: Get the specs for all processes (including called ones), in depth first order. ### Response: def get_specs_depth_first(self): """ Get the specs for all processes (including called ones), in depth first order. """ done = set() specs = [self] def recursive_find(task_spec): if task_spec in done: return done.add(task_spec) if hasattr(task_spec, 'spec'): specs.append(task_spec.spec) recursive_find(task_spec.spec.start) for t in task_spec.outputs: recursive_find(t) recursive_find(self.start) return specs
def uid(uid): """Decorator specifying the unique identifier (UID) of a test case. The UID will be recorded in the test's record when executed by Mobly. If you use any other decorator for the test method, you may want to use this as the outer-most one. Note a common UID system is the Universal Unitque Identifier (UUID), but we are not limiting people to use UUID, hence the more generic name `UID`. Args: uid: string, the uid for the decorated test function. """ if uid is None: raise ValueError('UID cannot be None.') def decorate(test_func): @functools.wraps(test_func) def wrapper(*args, **kwargs): return test_func(*args, **kwargs) setattr(wrapper, 'uid', uid) return wrapper return decorate
Decorator specifying the unique identifier (UID) of a test case. The UID will be recorded in the test's record when executed by Mobly. If you use any other decorator for the test method, you may want to use this as the outer-most one. Note a common UID system is the Universal Unitque Identifier (UUID), but we are not limiting people to use UUID, hence the more generic name `UID`. Args: uid: string, the uid for the decorated test function.
Below is the the instruction that describes the task: ### Input: Decorator specifying the unique identifier (UID) of a test case. The UID will be recorded in the test's record when executed by Mobly. If you use any other decorator for the test method, you may want to use this as the outer-most one. Note a common UID system is the Universal Unitque Identifier (UUID), but we are not limiting people to use UUID, hence the more generic name `UID`. Args: uid: string, the uid for the decorated test function. ### Response: def uid(uid): """Decorator specifying the unique identifier (UID) of a test case. The UID will be recorded in the test's record when executed by Mobly. If you use any other decorator for the test method, you may want to use this as the outer-most one. Note a common UID system is the Universal Unitque Identifier (UUID), but we are not limiting people to use UUID, hence the more generic name `UID`. Args: uid: string, the uid for the decorated test function. """ if uid is None: raise ValueError('UID cannot be None.') def decorate(test_func): @functools.wraps(test_func) def wrapper(*args, **kwargs): return test_func(*args, **kwargs) setattr(wrapper, 'uid', uid) return wrapper return decorate
def get_related_indicators_page(self, indicators=None, enclave_ids=None, page_size=None, page_number=None): """ Finds all reports that contain any of the given indicators and returns correlated indicators from those reports. :param indicators: list of indicator values to search for :param enclave_ids: list of IDs of enclaves to search in :param page_size: number of results per page :param page_number: page to start returning results on :return: A |Page| of |Report| objects. """ params = { 'indicators': indicators, 'enclaveIds': enclave_ids, 'pageNumber': page_number, 'pageSize': page_size } resp = self._client.get("indicators/related", params=params) return Page.from_dict(resp.json(), content_type=Indicator)
Finds all reports that contain any of the given indicators and returns correlated indicators from those reports. :param indicators: list of indicator values to search for :param enclave_ids: list of IDs of enclaves to search in :param page_size: number of results per page :param page_number: page to start returning results on :return: A |Page| of |Report| objects.
Below is the the instruction that describes the task: ### Input: Finds all reports that contain any of the given indicators and returns correlated indicators from those reports. :param indicators: list of indicator values to search for :param enclave_ids: list of IDs of enclaves to search in :param page_size: number of results per page :param page_number: page to start returning results on :return: A |Page| of |Report| objects. ### Response: def get_related_indicators_page(self, indicators=None, enclave_ids=None, page_size=None, page_number=None): """ Finds all reports that contain any of the given indicators and returns correlated indicators from those reports. :param indicators: list of indicator values to search for :param enclave_ids: list of IDs of enclaves to search in :param page_size: number of results per page :param page_number: page to start returning results on :return: A |Page| of |Report| objects. """ params = { 'indicators': indicators, 'enclaveIds': enclave_ids, 'pageNumber': page_number, 'pageSize': page_size } resp = self._client.get("indicators/related", params=params) return Page.from_dict(resp.json(), content_type=Indicator)
def selected_functions_2(self): """Obtain functions available for hazard and exposure selected by user. :returns: List of the available functions metadata. :rtype: list, None """ selection = self.tblFunctions2.selectedItems() if len(selection) != 1: return [] return selection[0].data(RoleFunctions)
Obtain functions available for hazard and exposure selected by user. :returns: List of the available functions metadata. :rtype: list, None
Below is the the instruction that describes the task: ### Input: Obtain functions available for hazard and exposure selected by user. :returns: List of the available functions metadata. :rtype: list, None ### Response: def selected_functions_2(self): """Obtain functions available for hazard and exposure selected by user. :returns: List of the available functions metadata. :rtype: list, None """ selection = self.tblFunctions2.selectedItems() if len(selection) != 1: return [] return selection[0].data(RoleFunctions)
def _l_cv_weight_factor(self): """ Return multiplier for L-CV weightings in case of enhanced single site analysis. Methodology source: Science Report SC050050, eqn. 6.15a and 6.15b """ b = 0.0047 * sqrt(0) + 0.0023 / 2 c = 0.02609 / (self.catchment.record_length - 1) return c / (b + c)
Return multiplier for L-CV weightings in case of enhanced single site analysis. Methodology source: Science Report SC050050, eqn. 6.15a and 6.15b
Below is the the instruction that describes the task: ### Input: Return multiplier for L-CV weightings in case of enhanced single site analysis. Methodology source: Science Report SC050050, eqn. 6.15a and 6.15b ### Response: def _l_cv_weight_factor(self): """ Return multiplier for L-CV weightings in case of enhanced single site analysis. Methodology source: Science Report SC050050, eqn. 6.15a and 6.15b """ b = 0.0047 * sqrt(0) + 0.0023 / 2 c = 0.02609 / (self.catchment.record_length - 1) return c / (b + c)
def setValue(self, value): """ Set the attributes value @param value: The new value (may be None) @type value: basestring @return: self @rtype: L{Attribute} """ if isinstance(value, Text): self.value = value else: self.value = Text(value) return self
Set the attributes value @param value: The new value (may be None) @type value: basestring @return: self @rtype: L{Attribute}
Below is the the instruction that describes the task: ### Input: Set the attributes value @param value: The new value (may be None) @type value: basestring @return: self @rtype: L{Attribute} ### Response: def setValue(self, value): """ Set the attributes value @param value: The new value (may be None) @type value: basestring @return: self @rtype: L{Attribute} """ if isinstance(value, Text): self.value = value else: self.value = Text(value) return self
def is_locked(self): """ Returns: - lock state(bool) Raises: RuntimeError """ _lockScreenRE = re.compile('mShowingLockscreen=(true|false)') m = _lockScreenRE.search(self.shell('dumpsys', 'window', 'policy')) if m: return (m.group(1) == 'true') raise RuntimeError("Couldn't determine screen lock state")
Returns: - lock state(bool) Raises: RuntimeError
Below is the the instruction that describes the task: ### Input: Returns: - lock state(bool) Raises: RuntimeError ### Response: def is_locked(self): """ Returns: - lock state(bool) Raises: RuntimeError """ _lockScreenRE = re.compile('mShowingLockscreen=(true|false)') m = _lockScreenRE.search(self.shell('dumpsys', 'window', 'policy')) if m: return (m.group(1) == 'true') raise RuntimeError("Couldn't determine screen lock state")
def liujordan(zenith, transmittance, airmass, dni_extra=1367.0): ''' Determine DNI, DHI, GHI from extraterrestrial flux, transmittance, and optical air mass number. Liu and Jordan, 1960, developed a simplified direct radiation model. DHI is from an empirical equation for diffuse radiation from Liu and Jordan, 1960. Parameters ---------- zenith: pd.Series True (not refraction-corrected) zenith angles in decimal degrees. If Z is a vector it must be of the same size as all other vector inputs. Z must be >=0 and <=180. transmittance: float Atmospheric transmittance between 0 and 1. pressure: float, default 101325.0 Air pressure dni_extra: float, default 1367.0 Direct irradiance incident at the top of the atmosphere. Returns ------- irradiance: DataFrame Modeled direct normal irradiance, direct horizontal irradiance, and global horizontal irradiance in W/m^2 References ---------- [1] Campbell, G. S., J. M. Norman (1998) An Introduction to Environmental Biophysics. 2nd Ed. New York: Springer. [2] Liu, B. Y., R. C. Jordan, (1960). "The interrelationship and characteristic distribution of direct, diffuse, and total solar radiation". Solar Energy 4:1-19 ''' tau = transmittance dni = dni_extra*tau**airmass dhi = 0.3 * (1.0 - tau**airmass) * dni_extra * np.cos(np.radians(zenith)) ghi = dhi + dni * np.cos(np.radians(zenith)) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(ghi, pd.Series): irrads = pd.DataFrame(irrads) return irrads
Determine DNI, DHI, GHI from extraterrestrial flux, transmittance, and optical air mass number. Liu and Jordan, 1960, developed a simplified direct radiation model. DHI is from an empirical equation for diffuse radiation from Liu and Jordan, 1960. Parameters ---------- zenith: pd.Series True (not refraction-corrected) zenith angles in decimal degrees. If Z is a vector it must be of the same size as all other vector inputs. Z must be >=0 and <=180. transmittance: float Atmospheric transmittance between 0 and 1. pressure: float, default 101325.0 Air pressure dni_extra: float, default 1367.0 Direct irradiance incident at the top of the atmosphere. Returns ------- irradiance: DataFrame Modeled direct normal irradiance, direct horizontal irradiance, and global horizontal irradiance in W/m^2 References ---------- [1] Campbell, G. S., J. M. Norman (1998) An Introduction to Environmental Biophysics. 2nd Ed. New York: Springer. [2] Liu, B. Y., R. C. Jordan, (1960). "The interrelationship and characteristic distribution of direct, diffuse, and total solar radiation". Solar Energy 4:1-19
Below is the the instruction that describes the task: ### Input: Determine DNI, DHI, GHI from extraterrestrial flux, transmittance, and optical air mass number. Liu and Jordan, 1960, developed a simplified direct radiation model. DHI is from an empirical equation for diffuse radiation from Liu and Jordan, 1960. Parameters ---------- zenith: pd.Series True (not refraction-corrected) zenith angles in decimal degrees. If Z is a vector it must be of the same size as all other vector inputs. Z must be >=0 and <=180. transmittance: float Atmospheric transmittance between 0 and 1. pressure: float, default 101325.0 Air pressure dni_extra: float, default 1367.0 Direct irradiance incident at the top of the atmosphere. Returns ------- irradiance: DataFrame Modeled direct normal irradiance, direct horizontal irradiance, and global horizontal irradiance in W/m^2 References ---------- [1] Campbell, G. S., J. M. Norman (1998) An Introduction to Environmental Biophysics. 2nd Ed. New York: Springer. [2] Liu, B. Y., R. C. Jordan, (1960). "The interrelationship and characteristic distribution of direct, diffuse, and total solar radiation". Solar Energy 4:1-19 ### Response: def liujordan(zenith, transmittance, airmass, dni_extra=1367.0): ''' Determine DNI, DHI, GHI from extraterrestrial flux, transmittance, and optical air mass number. Liu and Jordan, 1960, developed a simplified direct radiation model. DHI is from an empirical equation for diffuse radiation from Liu and Jordan, 1960. Parameters ---------- zenith: pd.Series True (not refraction-corrected) zenith angles in decimal degrees. If Z is a vector it must be of the same size as all other vector inputs. Z must be >=0 and <=180. transmittance: float Atmospheric transmittance between 0 and 1. pressure: float, default 101325.0 Air pressure dni_extra: float, default 1367.0 Direct irradiance incident at the top of the atmosphere. Returns ------- irradiance: DataFrame Modeled direct normal irradiance, direct horizontal irradiance, and global horizontal irradiance in W/m^2 References ---------- [1] Campbell, G. S., J. M. Norman (1998) An Introduction to Environmental Biophysics. 2nd Ed. New York: Springer. [2] Liu, B. Y., R. C. Jordan, (1960). "The interrelationship and characteristic distribution of direct, diffuse, and total solar radiation". Solar Energy 4:1-19 ''' tau = transmittance dni = dni_extra*tau**airmass dhi = 0.3 * (1.0 - tau**airmass) * dni_extra * np.cos(np.radians(zenith)) ghi = dhi + dni * np.cos(np.radians(zenith)) irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(ghi, pd.Series): irrads = pd.DataFrame(irrads) return irrads
def getMetricsTimeline(tmaster, component_name, metric_names, instances, start_time, end_time, callback=None): """ Get the specified metrics for the given component name of this topology. Returns the following dict on success: { "timeline": { <metricname>: { <instance>: { <start_time> : <numeric value>, <start_time> : <numeric value>, ... } ... }, ... }, "starttime": <numeric value>, "endtime": <numeric value>, "component": "..." } Returns the following dict on failure: { "message": "..." } """ # Tmaster is the proto object and must have host and port for stats. if not tmaster or not tmaster.host or not tmaster.stats_port: raise Exception("No Tmaster found") host = tmaster.host port = tmaster.stats_port # Create the proto request object to get metrics. metricRequest = tmaster_pb2.MetricRequest() metricRequest.component_name = component_name # If no instances are give, metrics for all instances # are fetched by default. if len(instances) > 0: for instance in instances: metricRequest.instance_id.append(instance) for metricName in metric_names: metricRequest.metric.append(metricName) metricRequest.explicit_interval.start = start_time metricRequest.explicit_interval.end = end_time metricRequest.minutely = True # Serialize the metricRequest to send as a payload # with the HTTP request. metricRequestString = metricRequest.SerializeToString() # Form and send the http request. url = "http://{0}:{1}/stats".format(host, port) request = tornado.httpclient.HTTPRequest(url, body=metricRequestString, method='POST', request_timeout=5) Log.debug("Making HTTP call to fetch metrics") Log.debug("url: " + url) try: client = tornado.httpclient.AsyncHTTPClient() result = yield client.fetch(request) Log.debug("HTTP call complete.") except tornado.httpclient.HTTPError as e: raise Exception(str(e)) # Check the response code - error if it is in 400s or 500s responseCode = result.code if responseCode >= 400: message = "Error in getting metrics from Tmaster, code: " + responseCode Log.error(message) raise Exception(message) # Parse the response from tmaster. metricResponse = tmaster_pb2.MetricResponse() metricResponse.ParseFromString(result.body) if metricResponse.status.status == common_pb2.NOTOK: if metricResponse.status.HasField("message"): Log.warn("Received response from Tmaster: %s", metricResponse.status.message) # Form the response. ret = {} ret["starttime"] = start_time ret["endtime"] = end_time ret["component"] = component_name ret["timeline"] = {} # Loop through all the metrics # One instance corresponds to one metric, which can have # multiple IndividualMetrics for each metricname requested. for metric in metricResponse.metric: instance = metric.instance_id # Loop through all individual metrics. for im in metric.metric: metricname = im.name if metricname not in ret["timeline"]: ret["timeline"][metricname] = {} if instance not in ret["timeline"][metricname]: ret["timeline"][metricname][instance] = {} # We get minutely metrics. # Interval-values correspond to the minutely mark for which # this metric value corresponds to. for interval_value in im.interval_values: ret["timeline"][metricname][instance][interval_value.interval.start] = interval_value.value raise tornado.gen.Return(ret)
Get the specified metrics for the given component name of this topology. Returns the following dict on success: { "timeline": { <metricname>: { <instance>: { <start_time> : <numeric value>, <start_time> : <numeric value>, ... } ... }, ... }, "starttime": <numeric value>, "endtime": <numeric value>, "component": "..." } Returns the following dict on failure: { "message": "..." }
Below is the the instruction that describes the task: ### Input: Get the specified metrics for the given component name of this topology. Returns the following dict on success: { "timeline": { <metricname>: { <instance>: { <start_time> : <numeric value>, <start_time> : <numeric value>, ... } ... }, ... }, "starttime": <numeric value>, "endtime": <numeric value>, "component": "..." } Returns the following dict on failure: { "message": "..." } ### Response: def getMetricsTimeline(tmaster, component_name, metric_names, instances, start_time, end_time, callback=None): """ Get the specified metrics for the given component name of this topology. Returns the following dict on success: { "timeline": { <metricname>: { <instance>: { <start_time> : <numeric value>, <start_time> : <numeric value>, ... } ... }, ... }, "starttime": <numeric value>, "endtime": <numeric value>, "component": "..." } Returns the following dict on failure: { "message": "..." } """ # Tmaster is the proto object and must have host and port for stats. if not tmaster or not tmaster.host or not tmaster.stats_port: raise Exception("No Tmaster found") host = tmaster.host port = tmaster.stats_port # Create the proto request object to get metrics. metricRequest = tmaster_pb2.MetricRequest() metricRequest.component_name = component_name # If no instances are give, metrics for all instances # are fetched by default. if len(instances) > 0: for instance in instances: metricRequest.instance_id.append(instance) for metricName in metric_names: metricRequest.metric.append(metricName) metricRequest.explicit_interval.start = start_time metricRequest.explicit_interval.end = end_time metricRequest.minutely = True # Serialize the metricRequest to send as a payload # with the HTTP request. metricRequestString = metricRequest.SerializeToString() # Form and send the http request. url = "http://{0}:{1}/stats".format(host, port) request = tornado.httpclient.HTTPRequest(url, body=metricRequestString, method='POST', request_timeout=5) Log.debug("Making HTTP call to fetch metrics") Log.debug("url: " + url) try: client = tornado.httpclient.AsyncHTTPClient() result = yield client.fetch(request) Log.debug("HTTP call complete.") except tornado.httpclient.HTTPError as e: raise Exception(str(e)) # Check the response code - error if it is in 400s or 500s responseCode = result.code if responseCode >= 400: message = "Error in getting metrics from Tmaster, code: " + responseCode Log.error(message) raise Exception(message) # Parse the response from tmaster. metricResponse = tmaster_pb2.MetricResponse() metricResponse.ParseFromString(result.body) if metricResponse.status.status == common_pb2.NOTOK: if metricResponse.status.HasField("message"): Log.warn("Received response from Tmaster: %s", metricResponse.status.message) # Form the response. ret = {} ret["starttime"] = start_time ret["endtime"] = end_time ret["component"] = component_name ret["timeline"] = {} # Loop through all the metrics # One instance corresponds to one metric, which can have # multiple IndividualMetrics for each metricname requested. for metric in metricResponse.metric: instance = metric.instance_id # Loop through all individual metrics. for im in metric.metric: metricname = im.name if metricname not in ret["timeline"]: ret["timeline"][metricname] = {} if instance not in ret["timeline"][metricname]: ret["timeline"][metricname][instance] = {} # We get minutely metrics. # Interval-values correspond to the minutely mark for which # this metric value corresponds to. for interval_value in im.interval_values: ret["timeline"][metricname][instance][interval_value.interval.start] = interval_value.value raise tornado.gen.Return(ret)
def _get_simple_uploaded_file(self, image, file_name): """ :param image: a python PIL ``Image`` instance. :param file_name: The file name of the image. :returns: A django ``SimpleUploadedFile`` instance ready to be saved. """ extension = os.path.splitext(file_name)[1] mimetype, encoding = mimetypes.guess_type(file_name) content_type = mimetype or 'image/png' temp_handle = BytesIO() image.save(temp_handle, self._get_pil_format(extension)) temp_handle.seek(0) # rewind the file suf = SimpleUploadedFile( file_name, temp_handle.read(), content_type=content_type, ) return suf
:param image: a python PIL ``Image`` instance. :param file_name: The file name of the image. :returns: A django ``SimpleUploadedFile`` instance ready to be saved.
Below is the the instruction that describes the task: ### Input: :param image: a python PIL ``Image`` instance. :param file_name: The file name of the image. :returns: A django ``SimpleUploadedFile`` instance ready to be saved. ### Response: def _get_simple_uploaded_file(self, image, file_name): """ :param image: a python PIL ``Image`` instance. :param file_name: The file name of the image. :returns: A django ``SimpleUploadedFile`` instance ready to be saved. """ extension = os.path.splitext(file_name)[1] mimetype, encoding = mimetypes.guess_type(file_name) content_type = mimetype or 'image/png' temp_handle = BytesIO() image.save(temp_handle, self._get_pil_format(extension)) temp_handle.seek(0) # rewind the file suf = SimpleUploadedFile( file_name, temp_handle.read(), content_type=content_type, ) return suf
def import_module(module_name): """Helper function to import module""" import sys, os import importlib sys.path.append(os.path.dirname(__file__)) return importlib.import_module(module_name)
Helper function to import module
Below is the the instruction that describes the task: ### Input: Helper function to import module ### Response: def import_module(module_name): """Helper function to import module""" import sys, os import importlib sys.path.append(os.path.dirname(__file__)) return importlib.import_module(module_name)
def win_menu_select_item(title, *items, **kwargs): """ Usage: win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)") :param title: :param text: :param items: :return: """ text = kwargs.get("text", "") if not (0 < len(items) < 8): raise ValueError("accepted none item or number of items exceed eight") f_items = [LPCWSTR(item) for item in items] for i in xrange(8 - len(f_items)): f_items.append(LPCWSTR("")) ret = AUTO_IT.AU3_WinMenuSelectItem(LPCWSTR(title), LPCWSTR(text), *f_items) return ret
Usage: win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)") :param title: :param text: :param items: :return:
Below is the the instruction that describes the task: ### Input: Usage: win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)") :param title: :param text: :param items: :return: ### Response: def win_menu_select_item(title, *items, **kwargs): """ Usage: win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)") :param title: :param text: :param items: :return: """ text = kwargs.get("text", "") if not (0 < len(items) < 8): raise ValueError("accepted none item or number of items exceed eight") f_items = [LPCWSTR(item) for item in items] for i in xrange(8 - len(f_items)): f_items.append(LPCWSTR("")) ret = AUTO_IT.AU3_WinMenuSelectItem(LPCWSTR(title), LPCWSTR(text), *f_items) return ret
def extract_features(self, dataset, missing_value_action='auto'): """ For each example in the dataset, extract the leaf indices of each tree as features. For multiclass classification, each leaf index contains #num_class numbers. The returned feature vectors can be used as input to train another supervised learning model such as a :py:class:`~turicreate.logistic_classifier.LogisticClassifier`, an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action: str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Choose a model dependent missing value policy. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'none': Treat missing value as is. Model must be able to handle missing value. - 'error' : Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray An SArray of dtype array.array containing extracted features. Examples -------- >>> data = turicreate.SFrame( 'https://static.turi.com/datasets/regression/houses.csv') >>> # Regression Tree Models >>> data['regression_tree_features'] = model.extract_features(data) >>> # Classification Tree Models >>> data['classification_tree_features'] = model.extract_features(data) """ _raise_error_if_not_sframe(dataset, "dataset") if missing_value_action == 'auto': missing_value_action = select_default_missing_value_policy(self, 'extract_features') return self.__proxy__.extract_features(dataset, missing_value_action)
For each example in the dataset, extract the leaf indices of each tree as features. For multiclass classification, each leaf index contains #num_class numbers. The returned feature vectors can be used as input to train another supervised learning model such as a :py:class:`~turicreate.logistic_classifier.LogisticClassifier`, an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action: str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Choose a model dependent missing value policy. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'none': Treat missing value as is. Model must be able to handle missing value. - 'error' : Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray An SArray of dtype array.array containing extracted features. Examples -------- >>> data = turicreate.SFrame( 'https://static.turi.com/datasets/regression/houses.csv') >>> # Regression Tree Models >>> data['regression_tree_features'] = model.extract_features(data) >>> # Classification Tree Models >>> data['classification_tree_features'] = model.extract_features(data)
Below is the the instruction that describes the task: ### Input: For each example in the dataset, extract the leaf indices of each tree as features. For multiclass classification, each leaf index contains #num_class numbers. The returned feature vectors can be used as input to train another supervised learning model such as a :py:class:`~turicreate.logistic_classifier.LogisticClassifier`, an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action: str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Choose a model dependent missing value policy. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'none': Treat missing value as is. Model must be able to handle missing value. - 'error' : Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray An SArray of dtype array.array containing extracted features. Examples -------- >>> data = turicreate.SFrame( 'https://static.turi.com/datasets/regression/houses.csv') >>> # Regression Tree Models >>> data['regression_tree_features'] = model.extract_features(data) >>> # Classification Tree Models >>> data['classification_tree_features'] = model.extract_features(data) ### Response: def extract_features(self, dataset, missing_value_action='auto'): """ For each example in the dataset, extract the leaf indices of each tree as features. For multiclass classification, each leaf index contains #num_class numbers. The returned feature vectors can be used as input to train another supervised learning model such as a :py:class:`~turicreate.logistic_classifier.LogisticClassifier`, an :py:class:`~turicreate.svm_classifier.SVMClassifier`, or a Parameters ---------- dataset : SFrame Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action: str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Choose a model dependent missing value policy. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'none': Treat missing value as is. Model must be able to handle missing value. - 'error' : Do not proceed with prediction and terminate with an error message. Returns ------- out : SArray An SArray of dtype array.array containing extracted features. Examples -------- >>> data = turicreate.SFrame( 'https://static.turi.com/datasets/regression/houses.csv') >>> # Regression Tree Models >>> data['regression_tree_features'] = model.extract_features(data) >>> # Classification Tree Models >>> data['classification_tree_features'] = model.extract_features(data) """ _raise_error_if_not_sframe(dataset, "dataset") if missing_value_action == 'auto': missing_value_action = select_default_missing_value_policy(self, 'extract_features') return self.__proxy__.extract_features(dataset, missing_value_action)
def args_as_tuple(self): """FIXME: en fonction de la manière dont la commande a été générée (factory ou parser), le type des arguments est différent : string quand ça vient de la factory ou type normal depuis le parser. Il faut uniformiser tout ça !! """ value = self.arguments["header-names"] if isinstance(value, list): value = "[{}]".format( ",".join('"{}"'.format(item) for item in value)) if not value.startswith("["): return ('exists', value.strip('"')) return ("exists", ) + tuple(tools.to_list(value))
FIXME: en fonction de la manière dont la commande a été générée (factory ou parser), le type des arguments est différent : string quand ça vient de la factory ou type normal depuis le parser. Il faut uniformiser tout ça !!
Below is the the instruction that describes the task: ### Input: FIXME: en fonction de la manière dont la commande a été générée (factory ou parser), le type des arguments est différent : string quand ça vient de la factory ou type normal depuis le parser. Il faut uniformiser tout ça !! ### Response: def args_as_tuple(self): """FIXME: en fonction de la manière dont la commande a été générée (factory ou parser), le type des arguments est différent : string quand ça vient de la factory ou type normal depuis le parser. Il faut uniformiser tout ça !! """ value = self.arguments["header-names"] if isinstance(value, list): value = "[{}]".format( ",".join('"{}"'.format(item) for item in value)) if not value.startswith("["): return ('exists', value.strip('"')) return ("exists", ) + tuple(tools.to_list(value))
def trace_sync(self, data, timeout=5.0): """Send tracing data and wait for it to finish. This awaitable coroutine wraps VirtualIOTileDevice.trace() and turns the callback into an awaitable object. The appropriate usage of this method is by calling it inside the event loop as: await device.trace_sync(data) Args: data (bytes): The raw data that should be traced. timeout (float): The maximum number of seconds to wait before timing out. Returns: awaitable: An awaitable object with the result. The result will be True if the data was sent successfully or False if the data could not be sent in its entirety. When False is returned, there is no guarantee about how much of the data was sent, if any, just that it was not known to be successfully sent. """ done = AwaitableResponse() self.trace(data, callback=done.set_result) return done.wait(timeout)
Send tracing data and wait for it to finish. This awaitable coroutine wraps VirtualIOTileDevice.trace() and turns the callback into an awaitable object. The appropriate usage of this method is by calling it inside the event loop as: await device.trace_sync(data) Args: data (bytes): The raw data that should be traced. timeout (float): The maximum number of seconds to wait before timing out. Returns: awaitable: An awaitable object with the result. The result will be True if the data was sent successfully or False if the data could not be sent in its entirety. When False is returned, there is no guarantee about how much of the data was sent, if any, just that it was not known to be successfully sent.
Below is the the instruction that describes the task: ### Input: Send tracing data and wait for it to finish. This awaitable coroutine wraps VirtualIOTileDevice.trace() and turns the callback into an awaitable object. The appropriate usage of this method is by calling it inside the event loop as: await device.trace_sync(data) Args: data (bytes): The raw data that should be traced. timeout (float): The maximum number of seconds to wait before timing out. Returns: awaitable: An awaitable object with the result. The result will be True if the data was sent successfully or False if the data could not be sent in its entirety. When False is returned, there is no guarantee about how much of the data was sent, if any, just that it was not known to be successfully sent. ### Response: def trace_sync(self, data, timeout=5.0): """Send tracing data and wait for it to finish. This awaitable coroutine wraps VirtualIOTileDevice.trace() and turns the callback into an awaitable object. The appropriate usage of this method is by calling it inside the event loop as: await device.trace_sync(data) Args: data (bytes): The raw data that should be traced. timeout (float): The maximum number of seconds to wait before timing out. Returns: awaitable: An awaitable object with the result. The result will be True if the data was sent successfully or False if the data could not be sent in its entirety. When False is returned, there is no guarantee about how much of the data was sent, if any, just that it was not known to be successfully sent. """ done = AwaitableResponse() self.trace(data, callback=done.set_result) return done.wait(timeout)
def organisation_group_id(self): """ str: Organisation Group ID """ self._validate() self._validate_for_organisation_group_id() parts = [] parts.append(self.election_type) if self.subtype: parts.append(self.subtype) parts.append(self.organisation) parts.append(self.date) return ".".join(parts)
str: Organisation Group ID
Below is the the instruction that describes the task: ### Input: str: Organisation Group ID ### Response: def organisation_group_id(self): """ str: Organisation Group ID """ self._validate() self._validate_for_organisation_group_id() parts = [] parts.append(self.election_type) if self.subtype: parts.append(self.subtype) parts.append(self.organisation) parts.append(self.date) return ".".join(parts)
def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0]
Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method
Below is the the instruction that describes the task: ### Input: Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ### Response: def get_host_cache(host_ref, host_cache_manager=None): ''' Returns a vim.HostScsiDisk if the host cache is configured on the specified host, other wise returns None host_ref The vim.HostSystem object representing the host that contains the requested disks. host_cache_manager The vim.HostCacheConfigurationManager object representing the cache configuration manager on the specified host. Default is None. If None, it will be retrieved in the method ''' hostname = get_managed_object_name(host_ref) service_instance = get_service_instance_from_managed_object(host_ref) log.trace('Retrieving the host cache on host \'%s\'', hostname) if not host_cache_manager: traversal_spec = vmodl.query.PropertyCollector.TraversalSpec( path='configManager.cacheConfigurationManager', type=vim.HostSystem, skip=False) results = get_mors_with_properties(service_instance, vim.HostCacheConfigurationManager, ['cacheConfigurationInfo'], container_ref=host_ref, traversal_spec=traversal_spec) if not results or not results[0].get('cacheConfigurationInfo'): log.trace('Host \'%s\' has no host cache', hostname) return None return results[0]['cacheConfigurationInfo'][0] else: results = get_properties_of_managed_object(host_cache_manager, ['cacheConfigurationInfo']) if not results: log.trace('Host \'%s\' has no host cache', hostname) return None return results['cacheConfigurationInfo'][0]
def is_atlas_enabled(blockstack_opts): """ Can we do atlas operations? """ if not blockstack_opts['atlas']: log.debug("Atlas is disabled") return False if 'zonefiles' not in blockstack_opts: log.debug("Atlas is disabled: no 'zonefiles' path set") return False if 'atlasdb_path' not in blockstack_opts: log.debug("Atlas is disabled: no 'atlasdb_path' path set") return False return True
Can we do atlas operations?
Below is the the instruction that describes the task: ### Input: Can we do atlas operations? ### Response: def is_atlas_enabled(blockstack_opts): """ Can we do atlas operations? """ if not blockstack_opts['atlas']: log.debug("Atlas is disabled") return False if 'zonefiles' not in blockstack_opts: log.debug("Atlas is disabled: no 'zonefiles' path set") return False if 'atlasdb_path' not in blockstack_opts: log.debug("Atlas is disabled: no 'atlasdb_path' path set") return False return True
def kompile(src, raw=False, filename='<compiler>', loader=None, **kwargs): ''' Creates a new class based on the supplied template, and returnsit. class Template(object): def __call__(self, context): return ''.join(self._iterator(context)) def _iterator(self, context): return map(str, self._root(context) def _root(self, context): yield '' yield ... yield from self.head(context) Blocks create new methods, and add a 'yield from self.{block}(context)' to the current function ''' parser = Parser(src, loader=loader) parser.load_library('knights.tags') parser.load_library('knights.helpers') parser.build_method('_root') if parser.parent: # Remove _root from the method list parser.methods = [ method for method in parser.methods if method.name != '_root' ] klass = parser.build_class() # Wrap it in a module inst = ast.Module(body=[klass]) ast.fix_missing_locations(inst) if kwargs.get('astor', False): import astor print(astor.to_source(inst)) # Compile code to create class code = compile(inst, filename=filename, mode='exec', optimize=2) # Execute it and return the instance g = { '_': Helpers(parser.helpers), 'parent': parser.parent, 'ContextScope': ContextScope, } eval(code, g) klass = g['Template'] if raw: return klass return klass()
Creates a new class based on the supplied template, and returnsit. class Template(object): def __call__(self, context): return ''.join(self._iterator(context)) def _iterator(self, context): return map(str, self._root(context) def _root(self, context): yield '' yield ... yield from self.head(context) Blocks create new methods, and add a 'yield from self.{block}(context)' to the current function
Below is the the instruction that describes the task: ### Input: Creates a new class based on the supplied template, and returnsit. class Template(object): def __call__(self, context): return ''.join(self._iterator(context)) def _iterator(self, context): return map(str, self._root(context) def _root(self, context): yield '' yield ... yield from self.head(context) Blocks create new methods, and add a 'yield from self.{block}(context)' to the current function ### Response: def kompile(src, raw=False, filename='<compiler>', loader=None, **kwargs): ''' Creates a new class based on the supplied template, and returnsit. class Template(object): def __call__(self, context): return ''.join(self._iterator(context)) def _iterator(self, context): return map(str, self._root(context) def _root(self, context): yield '' yield ... yield from self.head(context) Blocks create new methods, and add a 'yield from self.{block}(context)' to the current function ''' parser = Parser(src, loader=loader) parser.load_library('knights.tags') parser.load_library('knights.helpers') parser.build_method('_root') if parser.parent: # Remove _root from the method list parser.methods = [ method for method in parser.methods if method.name != '_root' ] klass = parser.build_class() # Wrap it in a module inst = ast.Module(body=[klass]) ast.fix_missing_locations(inst) if kwargs.get('astor', False): import astor print(astor.to_source(inst)) # Compile code to create class code = compile(inst, filename=filename, mode='exec', optimize=2) # Execute it and return the instance g = { '_': Helpers(parser.helpers), 'parent': parser.parent, 'ContextScope': ContextScope, } eval(code, g) klass = g['Template'] if raw: return klass return klass()
def sampleVRVT(self,R,n=1,nsigma=None,target=True): """ NAME: sampleVRVT PURPOSE: sample a radial and azimuthal velocity at R INPUT: R - Galactocentric distance (can be Quantity) n= number of distances to sample nsigma= number of sigma to rejection-sample on target= if True, sample using the 'target' sigma_R rather than the actual sigma_R (default=True) OUTPUT: list of samples BUGS: should use the fact that vR and vT separate HISTORY: 2011-03-24 - Written - Bovy (NYU) """ #Determine where the max of the v-distribution is using asymmetric drift maxVR= 0. maxVT= optimize.brentq(_vtmaxEq,0.,R**self._beta+0.2,(R,self)) maxVD= self(Orbit([R,maxVR,maxVT])) #Now rejection-sample if nsigma == None: nsigma= _NSIGMA out= [] if target: sigma= math.sqrt(self.targetSigma2(R,use_physical=False)) else: sigma= math.sqrt(self.sigma2(R,use_physical=False)) while len(out) < n: #sample vrg, vtg= nu.random.normal(), nu.random.normal() propvR= vrg*nsigma*sigma propvT= vtg*nsigma*sigma/self._gamma+maxVT VDatprop= self(Orbit([R,propvR,propvT])) if VDatprop/maxVD > nu.random.uniform()*nu.exp(-0.5*(vrg**2.+vtg**2.)): #accept out.append(sc.array([propvR,propvT])) return nu.array(out)
NAME: sampleVRVT PURPOSE: sample a radial and azimuthal velocity at R INPUT: R - Galactocentric distance (can be Quantity) n= number of distances to sample nsigma= number of sigma to rejection-sample on target= if True, sample using the 'target' sigma_R rather than the actual sigma_R (default=True) OUTPUT: list of samples BUGS: should use the fact that vR and vT separate HISTORY: 2011-03-24 - Written - Bovy (NYU)
Below is the the instruction that describes the task: ### Input: NAME: sampleVRVT PURPOSE: sample a radial and azimuthal velocity at R INPUT: R - Galactocentric distance (can be Quantity) n= number of distances to sample nsigma= number of sigma to rejection-sample on target= if True, sample using the 'target' sigma_R rather than the actual sigma_R (default=True) OUTPUT: list of samples BUGS: should use the fact that vR and vT separate HISTORY: 2011-03-24 - Written - Bovy (NYU) ### Response: def sampleVRVT(self,R,n=1,nsigma=None,target=True): """ NAME: sampleVRVT PURPOSE: sample a radial and azimuthal velocity at R INPUT: R - Galactocentric distance (can be Quantity) n= number of distances to sample nsigma= number of sigma to rejection-sample on target= if True, sample using the 'target' sigma_R rather than the actual sigma_R (default=True) OUTPUT: list of samples BUGS: should use the fact that vR and vT separate HISTORY: 2011-03-24 - Written - Bovy (NYU) """ #Determine where the max of the v-distribution is using asymmetric drift maxVR= 0. maxVT= optimize.brentq(_vtmaxEq,0.,R**self._beta+0.2,(R,self)) maxVD= self(Orbit([R,maxVR,maxVT])) #Now rejection-sample if nsigma == None: nsigma= _NSIGMA out= [] if target: sigma= math.sqrt(self.targetSigma2(R,use_physical=False)) else: sigma= math.sqrt(self.sigma2(R,use_physical=False)) while len(out) < n: #sample vrg, vtg= nu.random.normal(), nu.random.normal() propvR= vrg*nsigma*sigma propvT= vtg*nsigma*sigma/self._gamma+maxVT VDatprop= self(Orbit([R,propvR,propvT])) if VDatprop/maxVD > nu.random.uniform()*nu.exp(-0.5*(vrg**2.+vtg**2.)): #accept out.append(sc.array([propvR,propvT])) return nu.array(out)
def book(self, name): """Return an API wrapper for the given order book. :param name: Order book name (e.g. "btc_cad"). :type name: str | unicode :return: Order book API wrapper. :rtype: quadriga.book.OrderBook :raise InvalidOrderBookError: If an invalid order book is given. **Example**: .. doctest:: >>> from quadriga import QuadrigaClient >>> >>> client = QuadrigaClient() >>> >>> eth = client.book('eth_cad').get_ticker() # doctest:+ELLIPSIS >>> btc = client.book('btc_cad').get_ticker() # doctest:+ELLIPSIS """ self._validate_order_book(name) return OrderBook(name, self._rest_client, self._logger)
Return an API wrapper for the given order book. :param name: Order book name (e.g. "btc_cad"). :type name: str | unicode :return: Order book API wrapper. :rtype: quadriga.book.OrderBook :raise InvalidOrderBookError: If an invalid order book is given. **Example**: .. doctest:: >>> from quadriga import QuadrigaClient >>> >>> client = QuadrigaClient() >>> >>> eth = client.book('eth_cad').get_ticker() # doctest:+ELLIPSIS >>> btc = client.book('btc_cad').get_ticker() # doctest:+ELLIPSIS
Below is the the instruction that describes the task: ### Input: Return an API wrapper for the given order book. :param name: Order book name (e.g. "btc_cad"). :type name: str | unicode :return: Order book API wrapper. :rtype: quadriga.book.OrderBook :raise InvalidOrderBookError: If an invalid order book is given. **Example**: .. doctest:: >>> from quadriga import QuadrigaClient >>> >>> client = QuadrigaClient() >>> >>> eth = client.book('eth_cad').get_ticker() # doctest:+ELLIPSIS >>> btc = client.book('btc_cad').get_ticker() # doctest:+ELLIPSIS ### Response: def book(self, name): """Return an API wrapper for the given order book. :param name: Order book name (e.g. "btc_cad"). :type name: str | unicode :return: Order book API wrapper. :rtype: quadriga.book.OrderBook :raise InvalidOrderBookError: If an invalid order book is given. **Example**: .. doctest:: >>> from quadriga import QuadrigaClient >>> >>> client = QuadrigaClient() >>> >>> eth = client.book('eth_cad').get_ticker() # doctest:+ELLIPSIS >>> btc = client.book('btc_cad').get_ticker() # doctest:+ELLIPSIS """ self._validate_order_book(name) return OrderBook(name, self._rest_client, self._logger)
def hyperparameter_ranges(self): """Return the hyperparameter ranges in a dictionary to be used as part of a request for creating a hyperparameter tuning job. """ hyperparameter_ranges = dict() for range_type in ParameterRange.__all_types__: parameter_ranges = [] for parameter_name, parameter in self._hyperparameter_ranges.items(): if parameter is not None and parameter.__name__ == range_type: # Categorical parameters needed to be serialized as JSON for our framework containers if isinstance(parameter, CategoricalParameter) and isinstance(self.estimator, Framework): tuning_range = parameter.as_json_range(parameter_name) else: tuning_range = parameter.as_tuning_range(parameter_name) parameter_ranges.append(tuning_range) hyperparameter_ranges[range_type + 'ParameterRanges'] = parameter_ranges return hyperparameter_ranges
Return the hyperparameter ranges in a dictionary to be used as part of a request for creating a hyperparameter tuning job.
Below is the the instruction that describes the task: ### Input: Return the hyperparameter ranges in a dictionary to be used as part of a request for creating a hyperparameter tuning job. ### Response: def hyperparameter_ranges(self): """Return the hyperparameter ranges in a dictionary to be used as part of a request for creating a hyperparameter tuning job. """ hyperparameter_ranges = dict() for range_type in ParameterRange.__all_types__: parameter_ranges = [] for parameter_name, parameter in self._hyperparameter_ranges.items(): if parameter is not None and parameter.__name__ == range_type: # Categorical parameters needed to be serialized as JSON for our framework containers if isinstance(parameter, CategoricalParameter) and isinstance(self.estimator, Framework): tuning_range = parameter.as_json_range(parameter_name) else: tuning_range = parameter.as_tuning_range(parameter_name) parameter_ranges.append(tuning_range) hyperparameter_ranges[range_type + 'ParameterRanges'] = parameter_ranges return hyperparameter_ranges
def refit(self, data, label, decay_rate=0.9, **kwargs): """Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series / one-column DataFrame Label for refit. decay_rate : float, optional (default=0.9) Decay rate of refit, will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees. **kwargs Other parameters for refit. These parameters will be passed to ``predict`` method. Returns ------- result : Booster Refitted Booster. """ if self.__set_objective_to_none: raise LightGBMError('Cannot refit due to null objective function.') predictor = self._to_predictor(copy.deepcopy(kwargs)) leaf_preds = predictor.predict(data, -1, pred_leaf=True) nrow, ncol = leaf_preds.shape train_set = Dataset(data, label, silent=True) new_booster = Booster(self.params, train_set, silent=True) # Copy models _safe_call(_LIB.LGBM_BoosterMerge( new_booster.handle, predictor.handle)) leaf_preds = leaf_preds.reshape(-1) ptr_data, type_ptr_data, _ = c_int_array(leaf_preds) _safe_call(_LIB.LGBM_BoosterRefit( new_booster.handle, ptr_data, ctypes.c_int(nrow), ctypes.c_int(ncol))) new_booster.network = self.network new_booster.__attr = self.__attr.copy() return new_booster
Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series / one-column DataFrame Label for refit. decay_rate : float, optional (default=0.9) Decay rate of refit, will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees. **kwargs Other parameters for refit. These parameters will be passed to ``predict`` method. Returns ------- result : Booster Refitted Booster.
Below is the the instruction that describes the task: ### Input: Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series / one-column DataFrame Label for refit. decay_rate : float, optional (default=0.9) Decay rate of refit, will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees. **kwargs Other parameters for refit. These parameters will be passed to ``predict`` method. Returns ------- result : Booster Refitted Booster. ### Response: def refit(self, data, label, decay_rate=0.9, **kwargs): """Refit the existing Booster by new data. Parameters ---------- data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse Data source for refit. If string, it represents the path to txt file. label : list, numpy 1-D array or pandas Series / one-column DataFrame Label for refit. decay_rate : float, optional (default=0.9) Decay rate of refit, will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees. **kwargs Other parameters for refit. These parameters will be passed to ``predict`` method. Returns ------- result : Booster Refitted Booster. """ if self.__set_objective_to_none: raise LightGBMError('Cannot refit due to null objective function.') predictor = self._to_predictor(copy.deepcopy(kwargs)) leaf_preds = predictor.predict(data, -1, pred_leaf=True) nrow, ncol = leaf_preds.shape train_set = Dataset(data, label, silent=True) new_booster = Booster(self.params, train_set, silent=True) # Copy models _safe_call(_LIB.LGBM_BoosterMerge( new_booster.handle, predictor.handle)) leaf_preds = leaf_preds.reshape(-1) ptr_data, type_ptr_data, _ = c_int_array(leaf_preds) _safe_call(_LIB.LGBM_BoosterRefit( new_booster.handle, ptr_data, ctypes.c_int(nrow), ctypes.c_int(ncol))) new_booster.network = self.network new_booster.__attr = self.__attr.copy() return new_booster
def expect(func, args, times=7, sleep_t=0.5): """try many times as in times with sleep time""" while times > 0: try: return func(*args) except Exception as e: times -= 1 logger.debug("expect failed - attempts left: %d" % times) time.sleep(sleep_t) if times == 0: raise exceptions.BaseExc(e)
try many times as in times with sleep time
Below is the the instruction that describes the task: ### Input: try many times as in times with sleep time ### Response: def expect(func, args, times=7, sleep_t=0.5): """try many times as in times with sleep time""" while times > 0: try: return func(*args) except Exception as e: times -= 1 logger.debug("expect failed - attempts left: %d" % times) time.sleep(sleep_t) if times == 0: raise exceptions.BaseExc(e)
def VxLANTunnelState_TunnelDestinationIpAddress(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") VxLANTunnelState = ET.SubElement(config, "VxLANTunnelState", xmlns="http://brocade.com/ns/brocade-notification-stream") TunnelDestinationIpAddress = ET.SubElement(VxLANTunnelState, "TunnelDestinationIpAddress") TunnelDestinationIpAddress.text = kwargs.pop('TunnelDestinationIpAddress') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def VxLANTunnelState_TunnelDestinationIpAddress(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") VxLANTunnelState = ET.SubElement(config, "VxLANTunnelState", xmlns="http://brocade.com/ns/brocade-notification-stream") TunnelDestinationIpAddress = ET.SubElement(VxLANTunnelState, "TunnelDestinationIpAddress") TunnelDestinationIpAddress.text = kwargs.pop('TunnelDestinationIpAddress') callback = kwargs.pop('callback', self._callback) return callback(config)
def GetPythonLibraryDirectoryPath(): """Retrieves the Python library directory path.""" path = sysconfig.get_python_lib(True) _, _, path = path.rpartition(sysconfig.PREFIX) if path.startswith(os.sep): path = path[1:] return path
Retrieves the Python library directory path.
Below is the the instruction that describes the task: ### Input: Retrieves the Python library directory path. ### Response: def GetPythonLibraryDirectoryPath(): """Retrieves the Python library directory path.""" path = sysconfig.get_python_lib(True) _, _, path = path.rpartition(sysconfig.PREFIX) if path.startswith(os.sep): path = path[1:] return path
def pfeedback(self, msg: str) -> None: """For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`.""" if not self.quiet: if self.feedback_to_output: self.poutput(msg) else: self.decolorized_write(sys.stderr, "{}\n".format(msg))
For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`.
Below is the the instruction that describes the task: ### Input: For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`. ### Response: def pfeedback(self, msg: str) -> None: """For printing nonessential feedback. Can be silenced with `quiet`. Inclusion in redirected output is controlled by `feedback_to_output`.""" if not self.quiet: if self.feedback_to_output: self.poutput(msg) else: self.decolorized_write(sys.stderr, "{}\n".format(msg))
def _CopyFromDateTimeValues(self, date_time_values): """Copies time elements from date and time values. Args: date_time_values (dict[str, int]): date and time values, such as year, month, day of month, hours, minutes, seconds, microseconds. Raises: ValueError: if no helper can be created for the current precision. """ year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', 0) precision_helper = precisions.PrecisionHelperFactory.CreatePrecisionHelper( self._precision) fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond( microseconds) self._normalized_timestamp = None self._number_of_seconds = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self._time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds) self.fraction_of_second = fraction_of_second self.is_local_time = False
Copies time elements from date and time values. Args: date_time_values (dict[str, int]): date and time values, such as year, month, day of month, hours, minutes, seconds, microseconds. Raises: ValueError: if no helper can be created for the current precision.
Below is the the instruction that describes the task: ### Input: Copies time elements from date and time values. Args: date_time_values (dict[str, int]): date and time values, such as year, month, day of month, hours, minutes, seconds, microseconds. Raises: ValueError: if no helper can be created for the current precision. ### Response: def _CopyFromDateTimeValues(self, date_time_values): """Copies time elements from date and time values. Args: date_time_values (dict[str, int]): date and time values, such as year, month, day of month, hours, minutes, seconds, microseconds. Raises: ValueError: if no helper can be created for the current precision. """ year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', 0) precision_helper = precisions.PrecisionHelperFactory.CreatePrecisionHelper( self._precision) fraction_of_second = precision_helper.CopyMicrosecondsToFractionOfSecond( microseconds) self._normalized_timestamp = None self._number_of_seconds = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) self._time_elements_tuple = ( year, month, day_of_month, hours, minutes, seconds) self.fraction_of_second = fraction_of_second self.is_local_time = False
def strip_figures(figure): """ Strips a figure into multiple figures with a trace on each of them Parameters: ----------- figure : Figure Plotly Figure """ fig=[] for trace in figure['data']: fig.append(dict(data=[trace],layout=figure['layout'])) return fig
Strips a figure into multiple figures with a trace on each of them Parameters: ----------- figure : Figure Plotly Figure
Below is the the instruction that describes the task: ### Input: Strips a figure into multiple figures with a trace on each of them Parameters: ----------- figure : Figure Plotly Figure ### Response: def strip_figures(figure): """ Strips a figure into multiple figures with a trace on each of them Parameters: ----------- figure : Figure Plotly Figure """ fig=[] for trace in figure['data']: fig.append(dict(data=[trace],layout=figure['layout'])) return fig
def loop(self): """ Inner loop for interactive mode. Do not call directly. """ while True: with self.setup_readline(): try: line = input(self.prompt) except EOFError: _vprinterr('^D') break except KeyboardInterrupt: _vprinterr('^C') continue if not line.strip(): continue try: cmd, args = self.cmd_split(line) except KeyError as e: _vprinterr('<red>Invalid command: %s</red>' % e) continue try: cmd(argv=args) except SessionExit: break except SystemExit as e: pass
Inner loop for interactive mode. Do not call directly.
Below is the the instruction that describes the task: ### Input: Inner loop for interactive mode. Do not call directly. ### Response: def loop(self): """ Inner loop for interactive mode. Do not call directly. """ while True: with self.setup_readline(): try: line = input(self.prompt) except EOFError: _vprinterr('^D') break except KeyboardInterrupt: _vprinterr('^C') continue if not line.strip(): continue try: cmd, args = self.cmd_split(line) except KeyError as e: _vprinterr('<red>Invalid command: %s</red>' % e) continue try: cmd(argv=args) except SessionExit: break except SystemExit as e: pass
def list_queues(self): ''' Enumerates the queues in the service namespace. ''' request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/$Resources/Queues' request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers = self._update_service_bus_header(request) response = self._perform_request(request) return _ETreeXmlToObject.convert_response_to_feeds( response, _convert_etree_element_to_queue)
Enumerates the queues in the service namespace.
Below is the the instruction that describes the task: ### Input: Enumerates the queues in the service namespace. ### Response: def list_queues(self): ''' Enumerates the queues in the service namespace. ''' request = HTTPRequest() request.method = 'GET' request.host = self._get_host() request.path = '/$Resources/Queues' request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers = self._update_service_bus_header(request) response = self._perform_request(request) return _ETreeXmlToObject.convert_response_to_feeds( response, _convert_etree_element_to_queue)
def shared(self, value, name=None): """ Create a shared theano scalar value. """ if type(value) == int: final_value = np.array(value, dtype="int32") elif type(value) == float: final_value = np.array(value, dtype=env.FLOATX) else: final_value = value return theano.shared(final_value, name=name)
Create a shared theano scalar value.
Below is the the instruction that describes the task: ### Input: Create a shared theano scalar value. ### Response: def shared(self, value, name=None): """ Create a shared theano scalar value. """ if type(value) == int: final_value = np.array(value, dtype="int32") elif type(value) == float: final_value = np.array(value, dtype=env.FLOATX) else: final_value = value return theano.shared(final_value, name=name)
def ltouches(self, span): """ Returns true if the end of this span touches the left (starting) side of the given span. """ if isinstance(span, list): return [sp for sp in span if self._ltouches(sp)] return self._ltouches(span)
Returns true if the end of this span touches the left (starting) side of the given span.
Below is the the instruction that describes the task: ### Input: Returns true if the end of this span touches the left (starting) side of the given span. ### Response: def ltouches(self, span): """ Returns true if the end of this span touches the left (starting) side of the given span. """ if isinstance(span, list): return [sp for sp in span if self._ltouches(sp)] return self._ltouches(span)
def _basic_cancel_notify(self, args): """Consumer cancelled by server. Most likely the queue was deleted. """ consumer_tag = args.read_shortstr() callback = self._on_cancel(consumer_tag) if callback: callback(consumer_tag) else: raise ConsumerCancelled(consumer_tag, (60, 30))
Consumer cancelled by server. Most likely the queue was deleted.
Below is the the instruction that describes the task: ### Input: Consumer cancelled by server. Most likely the queue was deleted. ### Response: def _basic_cancel_notify(self, args): """Consumer cancelled by server. Most likely the queue was deleted. """ consumer_tag = args.read_shortstr() callback = self._on_cancel(consumer_tag) if callback: callback(consumer_tag) else: raise ConsumerCancelled(consumer_tag, (60, 30))
def patch_storage_class(self, name, body, **kwargs): # noqa: E501 """patch_storage_class # noqa: E501 partially update the specified StorageClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_storage_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StorageClass (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1StorageClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_storage_class_with_http_info(name, body, **kwargs) # noqa: E501 else: (data) = self.patch_storage_class_with_http_info(name, body, **kwargs) # noqa: E501 return data
patch_storage_class # noqa: E501 partially update the specified StorageClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_storage_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StorageClass (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1StorageClass If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: patch_storage_class # noqa: E501 partially update the specified StorageClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_storage_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StorageClass (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1StorageClass If the method is called asynchronously, returns the request thread. ### Response: def patch_storage_class(self, name, body, **kwargs): # noqa: E501 """patch_storage_class # noqa: E501 partially update the specified StorageClass # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_storage_class(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the StorageClass (required) :param UNKNOWN_BASE_TYPE body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1StorageClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_storage_class_with_http_info(name, body, **kwargs) # noqa: E501 else: (data) = self.patch_storage_class_with_http_info(name, body, **kwargs) # noqa: E501 return data
def override_if_not_in_args(flag, argument, args): """Checks if flags is in args, and if not it adds the flag to args.""" if flag not in args: args.extend([flag, argument])
Checks if flags is in args, and if not it adds the flag to args.
Below is the the instruction that describes the task: ### Input: Checks if flags is in args, and if not it adds the flag to args. ### Response: def override_if_not_in_args(flag, argument, args): """Checks if flags is in args, and if not it adds the flag to args.""" if flag not in args: args.extend([flag, argument])
def docker_fabric(*args, **kwargs): """ :param args: Positional arguments to Docker client. :param kwargs: Keyword arguments to Docker client. :return: Docker client. :rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient """ ci = kwargs.get('client_implementation') or env.get('docker_fabric_implementation') or CLIENT_API if ci == CLIENT_API: return docker_api(*args, **kwargs) elif ci == CLIENT_CLI: return docker_cli(*args, **kwargs) raise ValueError("Invalid client implementation.", ci)
:param args: Positional arguments to Docker client. :param kwargs: Keyword arguments to Docker client. :return: Docker client. :rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient
Below is the the instruction that describes the task: ### Input: :param args: Positional arguments to Docker client. :param kwargs: Keyword arguments to Docker client. :return: Docker client. :rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient ### Response: def docker_fabric(*args, **kwargs): """ :param args: Positional arguments to Docker client. :param kwargs: Keyword arguments to Docker client. :return: Docker client. :rtype: dockerfabric.apiclient.DockerFabricClient | dockerfabric.cli.DockerCliClient """ ci = kwargs.get('client_implementation') or env.get('docker_fabric_implementation') or CLIENT_API if ci == CLIENT_API: return docker_api(*args, **kwargs) elif ci == CLIENT_CLI: return docker_cli(*args, **kwargs) raise ValueError("Invalid client implementation.", ci)