repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
kejbaly2/metrique
metrique/reporting.py
Report.add_chapter
def add_chapter(self, title): ''' Adds a new chapter to the report. :param str title: Title of the chapter. ''' chap_id = 'chap%s' % self.chap_counter self.chap_counter += 1 self.sidebar += '<a href="#%s" class="list-group-item">%s</a>\n' % ( chap_id, title) self.body += '<h1 id="%s">%s</h1>\n' % (chap_id, title)
python
def add_chapter(self, title): ''' Adds a new chapter to the report. :param str title: Title of the chapter. ''' chap_id = 'chap%s' % self.chap_counter self.chap_counter += 1 self.sidebar += '<a href="#%s" class="list-group-item">%s</a>\n' % ( chap_id, title) self.body += '<h1 id="%s">%s</h1>\n' % (chap_id, title)
[ "def", "add_chapter", "(", "self", ",", "title", ")", ":", "chap_id", "=", "'chap%s'", "%", "self", ".", "chap_counter", "self", ".", "chap_counter", "+=", "1", "self", ".", "sidebar", "+=", "'<a href=\"#%s\" class=\"list-group-item\">%s</a>\\n'", "%", "(", "cha...
Adds a new chapter to the report. :param str title: Title of the chapter.
[ "Adds", "a", "new", "chapter", "to", "the", "report", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/reporting.py#L59-L69
kejbaly2/metrique
metrique/reporting.py
Report.add_image
def add_image(self, figure, dpi=72): ''' Adds an image to the last chapter/section. The image will be stored in the `{self.title}_files` directory. :param matplotlib.figure figure: A matplotlib figure to be saved into the report ''' name = os.path.join(self._dir, '/fig%s.png' % self.fig_counter) self.fig_counter += 1 figure.savefig(name, dpi=dpi) plt.close(figure) self.body += '<img src="%s" />\n' % name
python
def add_image(self, figure, dpi=72): ''' Adds an image to the last chapter/section. The image will be stored in the `{self.title}_files` directory. :param matplotlib.figure figure: A matplotlib figure to be saved into the report ''' name = os.path.join(self._dir, '/fig%s.png' % self.fig_counter) self.fig_counter += 1 figure.savefig(name, dpi=dpi) plt.close(figure) self.body += '<img src="%s" />\n' % name
[ "def", "add_image", "(", "self", ",", "figure", ",", "dpi", "=", "72", ")", ":", "name", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_dir", ",", "'/fig%s.png'", "%", "self", ".", "fig_counter", ")", "self", ".", "fig_counter", "+=", "1"...
Adds an image to the last chapter/section. The image will be stored in the `{self.title}_files` directory. :param matplotlib.figure figure: A matplotlib figure to be saved into the report
[ "Adds", "an", "image", "to", "the", "last", "chapter", "/", "section", ".", "The", "image", "will", "be", "stored", "in", "the", "{", "self", ".", "title", "}", "_files", "directory", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/reporting.py#L87-L99
kejbaly2/metrique
metrique/reporting.py
Report.write_report
def write_report(self, force=False): ''' Writes the report to a file. ''' path = self.title + '.html' value = self._template.format( title=self.title, body=self.body, sidebar=self.sidebar) write_file(path, value, force=force) plt.ion()
python
def write_report(self, force=False): ''' Writes the report to a file. ''' path = self.title + '.html' value = self._template.format( title=self.title, body=self.body, sidebar=self.sidebar) write_file(path, value, force=force) plt.ion()
[ "def", "write_report", "(", "self", ",", "force", "=", "False", ")", ":", "path", "=", "self", ".", "title", "+", "'.html'", "value", "=", "self", ".", "_template", ".", "format", "(", "title", "=", "self", ".", "title", ",", "body", "=", "self", "...
Writes the report to a file.
[ "Writes", "the", "report", "to", "a", "file", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/reporting.py#L101-L109
biocore/burrito-fillings
bfillings/fasttree_v1.py
build_tree_from_alignment
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params=None): """Returns a tree from alignment Will check MolType of aln object """ if params is None: params = {} if moltype == DNA or moltype == RNA: params['-nt'] = True elif moltype == PROTEIN: params['-nt'] = False else: raise ValueError, \ "FastTree does not support moltype: %s" % moltype.label app = FastTree(params=params) if best_tree: raise NotImplementedError, "best_tree not implemented yet" result = app(aln.toFasta()) tree = DndParser(result['Tree'].read(), constructor=PhyloNode) return tree
python
def build_tree_from_alignment(aln, moltype=DNA, best_tree=False, params=None): """Returns a tree from alignment Will check MolType of aln object """ if params is None: params = {} if moltype == DNA or moltype == RNA: params['-nt'] = True elif moltype == PROTEIN: params['-nt'] = False else: raise ValueError, \ "FastTree does not support moltype: %s" % moltype.label app = FastTree(params=params) if best_tree: raise NotImplementedError, "best_tree not implemented yet" result = app(aln.toFasta()) tree = DndParser(result['Tree'].read(), constructor=PhyloNode) return tree
[ "def", "build_tree_from_alignment", "(", "aln", ",", "moltype", "=", "DNA", ",", "best_tree", "=", "False", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "if", "moltype", "==", "DNA", "or", "moltype",...
Returns a tree from alignment Will check MolType of aln object
[ "Returns", "a", "tree", "from", "alignment" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/fasttree_v1.py#L123-L145
castelao/oceansdb
oceansdb/utils.py
dbsource
def dbsource(dbname, var, resolution=None, tscale=None): """Return which file(s) to use according to dbname, var, etc """ db_cfg = {} cfg_dir = 'datasource' cfg_files = pkg_resources.resource_listdir('oceansdb', cfg_dir) cfg_files = [f for f in cfg_files if f[-5:] == '.json'] for src_cfg in cfg_files: text = pkg_resources.resource_string( 'oceansdb', os.path.join(cfg_dir, src_cfg)) text = text.decode('UTF-8', 'replace') cfg = json.loads(text) for c in cfg: assert c not in db_cfg, "Trying to overwrite %s" db_cfg[c] = cfg[c] dbpath = oceansdb_dir() datafiles = [] cfg = db_cfg[dbname] if (resolution is None): resolution = cfg['vars'][var]['default_resolution'] if (tscale is None): tscale = cfg['vars'][var][resolution]["default_tscale"] for c in cfg['vars'][var][resolution][tscale]: download_file(outputdir=dbpath, **c) if 'filename' in c: filename = os.path.join(dbpath, c['filename']) else: filename = os.path.join(dbpath, os.path.basename(urlparse(c['url']).path)) if 'varnames' in cfg['vars'][var][resolution]: datafiles.append(Dataset_flex(filename, aliases=cfg['vars'][var][resolution]['varnames'])) else: datafiles.append(Dataset_flex(filename)) return datafiles
python
def dbsource(dbname, var, resolution=None, tscale=None): """Return which file(s) to use according to dbname, var, etc """ db_cfg = {} cfg_dir = 'datasource' cfg_files = pkg_resources.resource_listdir('oceansdb', cfg_dir) cfg_files = [f for f in cfg_files if f[-5:] == '.json'] for src_cfg in cfg_files: text = pkg_resources.resource_string( 'oceansdb', os.path.join(cfg_dir, src_cfg)) text = text.decode('UTF-8', 'replace') cfg = json.loads(text) for c in cfg: assert c not in db_cfg, "Trying to overwrite %s" db_cfg[c] = cfg[c] dbpath = oceansdb_dir() datafiles = [] cfg = db_cfg[dbname] if (resolution is None): resolution = cfg['vars'][var]['default_resolution'] if (tscale is None): tscale = cfg['vars'][var][resolution]["default_tscale"] for c in cfg['vars'][var][resolution][tscale]: download_file(outputdir=dbpath, **c) if 'filename' in c: filename = os.path.join(dbpath, c['filename']) else: filename = os.path.join(dbpath, os.path.basename(urlparse(c['url']).path)) if 'varnames' in cfg['vars'][var][resolution]: datafiles.append(Dataset_flex(filename, aliases=cfg['vars'][var][resolution]['varnames'])) else: datafiles.append(Dataset_flex(filename)) return datafiles
[ "def", "dbsource", "(", "dbname", ",", "var", ",", "resolution", "=", "None", ",", "tscale", "=", "None", ")", ":", "db_cfg", "=", "{", "}", "cfg_dir", "=", "'datasource'", "cfg_files", "=", "pkg_resources", ".", "resource_listdir", "(", "'oceansdb'", ",",...
Return which file(s) to use according to dbname, var, etc
[ "Return", "which", "file", "(", "s", ")", "to", "use", "according", "to", "dbname", "var", "etc" ]
train
https://github.com/castelao/oceansdb/blob/a154c5b845845a602800f9bc53d1702d4cb0f9c5/oceansdb/utils.py#L61-L102
halfak/deltas
deltas/algorithms/sequence_matcher.py
diff
def diff(a, b): """ Performs a longest common substring diff. :Parameters: a : sequence of `comparable` Initial sequence b : sequence of `comparable` Changed sequence :Returns: An `iterable` of operations. """ a, b = list(a), list(b) opcodes = SM(None, a, b).get_opcodes() return parse_opcodes(opcodes)
python
def diff(a, b): """ Performs a longest common substring diff. :Parameters: a : sequence of `comparable` Initial sequence b : sequence of `comparable` Changed sequence :Returns: An `iterable` of operations. """ a, b = list(a), list(b) opcodes = SM(None, a, b).get_opcodes() return parse_opcodes(opcodes)
[ "def", "diff", "(", "a", ",", "b", ")", ":", "a", ",", "b", "=", "list", "(", "a", ")", ",", "list", "(", "b", ")", "opcodes", "=", "SM", "(", "None", ",", "a", ",", "b", ")", ".", "get_opcodes", "(", ")", "return", "parse_opcodes", "(", "o...
Performs a longest common substring diff. :Parameters: a : sequence of `comparable` Initial sequence b : sequence of `comparable` Changed sequence :Returns: An `iterable` of operations.
[ "Performs", "a", "longest", "common", "substring", "diff", "." ]
train
https://github.com/halfak/deltas/blob/4173f4215b93426a877f4bb4a7a3547834e60ac3/deltas/algorithms/sequence_matcher.py#L33-L48
michaelpb/omnic
omnic/conversion/utils.py
convert_endpoint
async def convert_endpoint(url_string, ts, is_just_checking): ''' Main logic for HTTP endpoint. ''' response = singletons.server.response # Prep ForeignResource and ensure does not validate security settings singletons.settings foreign_res = ForeignResource(url_string) target_ts = TypeString(ts) target_resource = TypedResource(url_string, target_ts) # Send back cache if it exists if target_resource.cache_exists(): if is_just_checking: return _just_checking_response(True, target_resource) return await response.file(target_resource.cache_path, headers={ 'Content-Type': target_ts.mimetype, }) # Check if already downloaded. If not, queue up download. if not foreign_res.cache_exists(): singletons.workers.enqueue_download(foreign_res) # Queue up a single function that will in turn queue up conversion # process singletons.workers.enqueue_sync( enqueue_conversion_path, url_string, str(target_ts), singletons.workers.enqueue_convert ) if is_just_checking: return _just_checking_response(False, target_resource) # Respond with placeholder return singletons.placeholders.stream_response(target_ts, response)
python
async def convert_endpoint(url_string, ts, is_just_checking): ''' Main logic for HTTP endpoint. ''' response = singletons.server.response # Prep ForeignResource and ensure does not validate security settings singletons.settings foreign_res = ForeignResource(url_string) target_ts = TypeString(ts) target_resource = TypedResource(url_string, target_ts) # Send back cache if it exists if target_resource.cache_exists(): if is_just_checking: return _just_checking_response(True, target_resource) return await response.file(target_resource.cache_path, headers={ 'Content-Type': target_ts.mimetype, }) # Check if already downloaded. If not, queue up download. if not foreign_res.cache_exists(): singletons.workers.enqueue_download(foreign_res) # Queue up a single function that will in turn queue up conversion # process singletons.workers.enqueue_sync( enqueue_conversion_path, url_string, str(target_ts), singletons.workers.enqueue_convert ) if is_just_checking: return _just_checking_response(False, target_resource) # Respond with placeholder return singletons.placeholders.stream_response(target_ts, response)
[ "async", "def", "convert_endpoint", "(", "url_string", ",", "ts", ",", "is_just_checking", ")", ":", "response", "=", "singletons", ".", "server", ".", "response", "# Prep ForeignResource and ensure does not validate security settings", "singletons", ".", "settings", "for...
Main logic for HTTP endpoint.
[ "Main", "logic", "for", "HTTP", "endpoint", "." ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/utils.py#L15-L53
michaelpb/omnic
omnic/conversion/utils.py
apply_command_list_template
def apply_command_list_template(command_list, in_path, out_path, args): ''' Perform necessary substitutions on a command list to create a CLI-ready list to launch a conversion or download process via system binary. ''' replacements = { '$IN': in_path, '$OUT': out_path, } # Add in positional arguments ($0, $1, etc) for i, arg in enumerate(args): replacements['$' + str(i)] = arg results = [replacements.get(arg, arg) for arg in command_list] # Returns list of truthy replaced arguments in command return [item for item in results if item]
python
def apply_command_list_template(command_list, in_path, out_path, args): ''' Perform necessary substitutions on a command list to create a CLI-ready list to launch a conversion or download process via system binary. ''' replacements = { '$IN': in_path, '$OUT': out_path, } # Add in positional arguments ($0, $1, etc) for i, arg in enumerate(args): replacements['$' + str(i)] = arg results = [replacements.get(arg, arg) for arg in command_list] # Returns list of truthy replaced arguments in command return [item for item in results if item]
[ "def", "apply_command_list_template", "(", "command_list", ",", "in_path", ",", "out_path", ",", "args", ")", ":", "replacements", "=", "{", "'$IN'", ":", "in_path", ",", "'$OUT'", ":", "out_path", ",", "}", "# Add in positional arguments ($0, $1, etc)", "for", "i...
Perform necessary substitutions on a command list to create a CLI-ready list to launch a conversion or download process via system binary.
[ "Perform", "necessary", "substitutions", "on", "a", "command", "list", "to", "create", "a", "CLI", "-", "ready", "list", "to", "launch", "a", "conversion", "or", "download", "process", "via", "system", "binary", "." ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/utils.py#L55-L72
michaelpb/omnic
omnic/conversion/utils.py
convert_local
async def convert_local(path, to_type): ''' Given an absolute path to a local file, convert to a given to_type ''' # Now find path between types typed_foreign_res = TypedLocalResource(path) original_ts = typed_foreign_res.typestring conversion_path = singletons.converter_graph.find_path( original_ts, to_type) # print('Conversion path: ', conversion_path) # Loop through each step in graph path and convert for is_first, is_last, path_step in first_last_iterator(conversion_path): converter_class, from_ts, to_ts = path_step converter = converter_class() in_resource = TypedLocalResource(path, from_ts) if is_first: # Ensure first resource is just the source one in_resource = typed_foreign_res out_resource = TypedLocalResource(path, to_ts) if is_last: out_resource = TypedPathedLocalResource(path, to_ts) await converter.convert(in_resource, out_resource)
python
async def convert_local(path, to_type): ''' Given an absolute path to a local file, convert to a given to_type ''' # Now find path between types typed_foreign_res = TypedLocalResource(path) original_ts = typed_foreign_res.typestring conversion_path = singletons.converter_graph.find_path( original_ts, to_type) # print('Conversion path: ', conversion_path) # Loop through each step in graph path and convert for is_first, is_last, path_step in first_last_iterator(conversion_path): converter_class, from_ts, to_ts = path_step converter = converter_class() in_resource = TypedLocalResource(path, from_ts) if is_first: # Ensure first resource is just the source one in_resource = typed_foreign_res out_resource = TypedLocalResource(path, to_ts) if is_last: out_resource = TypedPathedLocalResource(path, to_ts) await converter.convert(in_resource, out_resource)
[ "async", "def", "convert_local", "(", "path", ",", "to_type", ")", ":", "# Now find path between types", "typed_foreign_res", "=", "TypedLocalResource", "(", "path", ")", "original_ts", "=", "typed_foreign_res", ".", "typestring", "conversion_path", "=", "singletons", ...
Given an absolute path to a local file, convert to a given to_type
[ "Given", "an", "absolute", "path", "to", "a", "local", "file", "convert", "to", "a", "given", "to_type" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/utils.py#L76-L98
michaelpb/omnic
omnic/conversion/utils.py
enqueue_conversion_path
def enqueue_conversion_path(url_string, to_type, enqueue_convert): ''' Given a URL string that has already been downloaded, enqueue necessary conversion to get to target type ''' target_ts = TypeString(to_type) foreign_res = ForeignResource(url_string) # Determine the file type of the foreign resource typed_foreign_res = foreign_res.guess_typed() if not typed_foreign_res.cache_exists(): # Symlink to new location that includes typed extension typed_foreign_res.symlink_from(foreign_res) # Now find path between types original_ts = typed_foreign_res.typestring path = singletons.converter_graph.find_path(original_ts, target_ts) # Loop through each step in graph path and convert is_first = True for converter_class, from_ts, to_ts in path: converter = converter_class() in_resource = TypedResource(url_string, from_ts) if is_first: # Ensure first resource is just the source one in_resource = TypedForeignResource(url_string, from_ts) out_resource = TypedResource(url_string, to_ts) enqueue_convert(converter, in_resource, out_resource) is_first = False
python
def enqueue_conversion_path(url_string, to_type, enqueue_convert): ''' Given a URL string that has already been downloaded, enqueue necessary conversion to get to target type ''' target_ts = TypeString(to_type) foreign_res = ForeignResource(url_string) # Determine the file type of the foreign resource typed_foreign_res = foreign_res.guess_typed() if not typed_foreign_res.cache_exists(): # Symlink to new location that includes typed extension typed_foreign_res.symlink_from(foreign_res) # Now find path between types original_ts = typed_foreign_res.typestring path = singletons.converter_graph.find_path(original_ts, target_ts) # Loop through each step in graph path and convert is_first = True for converter_class, from_ts, to_ts in path: converter = converter_class() in_resource = TypedResource(url_string, from_ts) if is_first: # Ensure first resource is just the source one in_resource = TypedForeignResource(url_string, from_ts) out_resource = TypedResource(url_string, to_ts) enqueue_convert(converter, in_resource, out_resource) is_first = False
[ "def", "enqueue_conversion_path", "(", "url_string", ",", "to_type", ",", "enqueue_convert", ")", ":", "target_ts", "=", "TypeString", "(", "to_type", ")", "foreign_res", "=", "ForeignResource", "(", "url_string", ")", "# Determine the file type of the foreign resource", ...
Given a URL string that has already been downloaded, enqueue necessary conversion to get to target type
[ "Given", "a", "URL", "string", "that", "has", "already", "been", "downloaded", "enqueue", "necessary", "conversion", "to", "get", "to", "target", "type" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/utils.py#L101-L129
dailymuse/oz
oz/core/actions.py
check_path
def check_path(path, otherwise): """ Checks if a path exists. If it does, print a warning message; if not, execute the `otherwise` callback argument. """ if os.path.exists(path): print("WARNING: Path '%s' already exists; skipping" % path) else: otherwise(path)
python
def check_path(path, otherwise): """ Checks if a path exists. If it does, print a warning message; if not, execute the `otherwise` callback argument. """ if os.path.exists(path): print("WARNING: Path '%s' already exists; skipping" % path) else: otherwise(path)
[ "def", "check_path", "(", "path", ",", "otherwise", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "print", "(", "\"WARNING: Path '%s' already exists; skipping\"", "%", "path", ")", "else", ":", "otherwise", "(", "path", ")" ]
Checks if a path exists. If it does, print a warning message; if not, execute the `otherwise` callback argument.
[ "Checks", "if", "a", "path", "exists", ".", "If", "it", "does", "print", "a", "warning", "message", ";", "if", "not", "execute", "the", "otherwise", "callback", "argument", "." ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/core/actions.py#L25-L34
dailymuse/oz
oz/core/actions.py
config_maker
def config_maker(project_name, path): """Creates a config file based on the project name""" with open(skeleton_path("config.py"), "r") as config_source: config_content = config_source.read() config_content = config_content.replace("__PROJECT_NAME__", project_name) with open(path, "w") as config_dest: config_dest.write(config_content)
python
def config_maker(project_name, path): """Creates a config file based on the project name""" with open(skeleton_path("config.py"), "r") as config_source: config_content = config_source.read() config_content = config_content.replace("__PROJECT_NAME__", project_name) with open(path, "w") as config_dest: config_dest.write(config_content)
[ "def", "config_maker", "(", "project_name", ",", "path", ")", ":", "with", "open", "(", "skeleton_path", "(", "\"config.py\"", ")", ",", "\"r\"", ")", "as", "config_source", ":", "config_content", "=", "config_source", ".", "read", "(", ")", "config_content", ...
Creates a config file based on the project name
[ "Creates", "a", "config", "file", "based", "on", "the", "project", "name" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/core/actions.py#L36-L45
dailymuse/oz
oz/core/actions.py
skeleton_path
def skeleton_path(parts): """Gets the path to a skeleton asset""" return os.path.join(os.path.dirname(oz.__file__), "skeleton", parts)
python
def skeleton_path(parts): """Gets the path to a skeleton asset""" return os.path.join(os.path.dirname(oz.__file__), "skeleton", parts)
[ "def", "skeleton_path", "(", "parts", ")", ":", "return", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "oz", ".", "__file__", ")", ",", "\"skeleton\"", ",", "parts", ")" ]
Gets the path to a skeleton asset
[ "Gets", "the", "path", "to", "a", "skeleton", "asset" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/core/actions.py#L47-L49
dailymuse/oz
oz/core/actions.py
init
def init(project_name): """Creates a new project""" if not VALID_PROJECT_NAME.match(project_name): print("Invalid project name. It may only contain letters, numbers and underscores.", file=sys.stderr) return check_path(project_name, functools.partial(shutil.copytree, skeleton_path("plugin"))) check_path("static", os.mkdir) check_path("templates", os.mkdir) check_path("config.py", functools.partial(config_maker, project_name))
python
def init(project_name): """Creates a new project""" if not VALID_PROJECT_NAME.match(project_name): print("Invalid project name. It may only contain letters, numbers and underscores.", file=sys.stderr) return check_path(project_name, functools.partial(shutil.copytree, skeleton_path("plugin"))) check_path("static", os.mkdir) check_path("templates", os.mkdir) check_path("config.py", functools.partial(config_maker, project_name))
[ "def", "init", "(", "project_name", ")", ":", "if", "not", "VALID_PROJECT_NAME", ".", "match", "(", "project_name", ")", ":", "print", "(", "\"Invalid project name. It may only contain letters, numbers and underscores.\"", ",", "file", "=", "sys", ".", "stderr", ")", ...
Creates a new project
[ "Creates", "a", "new", "project" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/core/actions.py#L52-L62
dailymuse/oz
oz/core/actions.py
server
def server(): """Runs the server""" tornado.log.enable_pretty_logging() # Get and validate the server_type server_type = oz.settings["server_type"] if server_type not in [None, "wsgi", "asyncio", "twisted"]: raise Exception("Unknown server type: %s" % server_type) # Install the correct ioloop if necessary if server_type == "asyncio": from tornado.platform.asyncio import AsyncIOMainLoop AsyncIOMainLoop().install() elif server_type == "twisted": from tornado.platform.twisted import TwistedIOLoop TwistedIOLoop().install() if server_type == "wsgi": wsgi_app = tornado.wsgi.WSGIApplication(oz._routes, **oz.settings) wsgi_srv = wsgiref.simple_server.make_server("", oz.settings["port"], wsgi_app) wsgi_srv.serve_forever() else: web_app = tornado.web.Application(oz._routes, **oz.settings) if oz.settings["ssl_cert_file"] != None and oz.settings["ssl_key_file"] != None: ssl_options = { "certfile": oz.settings["ssl_cert_file"], "keyfile": oz.settings["ssl_key_file"], "cert_reqs": oz.settings["ssl_cert_reqs"], "ca_certs": oz.settings["ssl_ca_certs"] } else: ssl_options = None http_srv = tornado.httpserver.HTTPServer( web_app, ssl_options=ssl_options, body_timeout=oz.settings["body_timeout"], xheaders=oz.settings["xheaders"] ) http_srv.bind(oz.settings["port"]) server_workers = oz.settings["server_workers"] if server_workers > 1: if oz.settings["debug"]: print("WARNING: Debug is enabled, but multiple server workers have been configured. Only one server worker can run in debug mode.") server_workers = 1 elif (server_type == "asyncio" or server_type == "twisted"): print("WARNING: A non-default server type is being used, but multiple server workers have been configured. Only one server worker can run on a non-default server type.") server_workers = 1 # Forks multiple sub-processes if server_workers > 1 http_srv.start(server_workers) # Registers signal handles for graceful server shutdown if oz.settings.get("use_graceful_shutdown"): if server_type == "asyncio" or server_type == "twisted": print("WARNING: Cannot enable graceful shutdown for asyncio or twisted server types.") else: # NOTE: Do not expect any logging to with certain tools (e.g., invoker), # because they may quiet logs on SIGINT/SIGTERM signal.signal(signal.SIGTERM, functools.partial(_shutdown_tornado_ioloop, http_srv)) signal.signal(signal.SIGINT, functools.partial(_shutdown_tornado_ioloop, http_srv)) # Starts the ioloops if server_type == "asyncio": import asyncio asyncio.get_event_loop().run_forever() elif server_type == "twisted": from twisted.internet import reactor reactor.run() else: from tornado import ioloop ioloop.IOLoop.instance().start()
python
def server(): """Runs the server""" tornado.log.enable_pretty_logging() # Get and validate the server_type server_type = oz.settings["server_type"] if server_type not in [None, "wsgi", "asyncio", "twisted"]: raise Exception("Unknown server type: %s" % server_type) # Install the correct ioloop if necessary if server_type == "asyncio": from tornado.platform.asyncio import AsyncIOMainLoop AsyncIOMainLoop().install() elif server_type == "twisted": from tornado.platform.twisted import TwistedIOLoop TwistedIOLoop().install() if server_type == "wsgi": wsgi_app = tornado.wsgi.WSGIApplication(oz._routes, **oz.settings) wsgi_srv = wsgiref.simple_server.make_server("", oz.settings["port"], wsgi_app) wsgi_srv.serve_forever() else: web_app = tornado.web.Application(oz._routes, **oz.settings) if oz.settings["ssl_cert_file"] != None and oz.settings["ssl_key_file"] != None: ssl_options = { "certfile": oz.settings["ssl_cert_file"], "keyfile": oz.settings["ssl_key_file"], "cert_reqs": oz.settings["ssl_cert_reqs"], "ca_certs": oz.settings["ssl_ca_certs"] } else: ssl_options = None http_srv = tornado.httpserver.HTTPServer( web_app, ssl_options=ssl_options, body_timeout=oz.settings["body_timeout"], xheaders=oz.settings["xheaders"] ) http_srv.bind(oz.settings["port"]) server_workers = oz.settings["server_workers"] if server_workers > 1: if oz.settings["debug"]: print("WARNING: Debug is enabled, but multiple server workers have been configured. Only one server worker can run in debug mode.") server_workers = 1 elif (server_type == "asyncio" or server_type == "twisted"): print("WARNING: A non-default server type is being used, but multiple server workers have been configured. Only one server worker can run on a non-default server type.") server_workers = 1 # Forks multiple sub-processes if server_workers > 1 http_srv.start(server_workers) # Registers signal handles for graceful server shutdown if oz.settings.get("use_graceful_shutdown"): if server_type == "asyncio" or server_type == "twisted": print("WARNING: Cannot enable graceful shutdown for asyncio or twisted server types.") else: # NOTE: Do not expect any logging to with certain tools (e.g., invoker), # because they may quiet logs on SIGINT/SIGTERM signal.signal(signal.SIGTERM, functools.partial(_shutdown_tornado_ioloop, http_srv)) signal.signal(signal.SIGINT, functools.partial(_shutdown_tornado_ioloop, http_srv)) # Starts the ioloops if server_type == "asyncio": import asyncio asyncio.get_event_loop().run_forever() elif server_type == "twisted": from twisted.internet import reactor reactor.run() else: from tornado import ioloop ioloop.IOLoop.instance().start()
[ "def", "server", "(", ")", ":", "tornado", ".", "log", ".", "enable_pretty_logging", "(", ")", "# Get and validate the server_type", "server_type", "=", "oz", ".", "settings", "[", "\"server_type\"", "]", "if", "server_type", "not", "in", "[", "None", ",", "\"...
Runs the server
[ "Runs", "the", "server" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/core/actions.py#L65-L141
dailymuse/oz
oz/core/actions.py
repl
def repl(): """Runs an IPython repl with some context""" try: import IPython except: print("ERROR: IPython is not installed. Please install it to use the repl.", file=sys.stderr) raise IPython.embed(user_ns=dict( settings=oz.settings, actions=oz._actions, uimodules=oz._uimodules, routes=oz._routes, ))
python
def repl(): """Runs an IPython repl with some context""" try: import IPython except: print("ERROR: IPython is not installed. Please install it to use the repl.", file=sys.stderr) raise IPython.embed(user_ns=dict( settings=oz.settings, actions=oz._actions, uimodules=oz._uimodules, routes=oz._routes, ))
[ "def", "repl", "(", ")", ":", "try", ":", "import", "IPython", "except", ":", "print", "(", "\"ERROR: IPython is not installed. Please install it to use the repl.\"", ",", "file", "=", "sys", ".", "stderr", ")", "raise", "IPython", ".", "embed", "(", "user_ns", ...
Runs an IPython repl with some context
[ "Runs", "an", "IPython", "repl", "with", "some", "context" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/core/actions.py#L144-L158
michaelpb/omnic
omnic/conversion/resolvergraph.py
ResolverGraph.find_resource_url_basename
def find_resource_url_basename(self, resource_url): ''' Figure out path basename for given resource_url ''' scheme = resource_url.parsed.scheme if scheme in ('http', 'https', 'file'): return _get_basename_based_on_url(resource_url) elif scheme in ('git', 'git+https', 'git+http'): if len(resource_url.args) == 2: # For now, git has 2 positional args, hash and path git_tree, subpath = resource_url.args basename = os.path.basename(subpath) if basename: return basename # subpath was not '/' or '' return _get_basename_based_on_url(resource_url)
python
def find_resource_url_basename(self, resource_url): ''' Figure out path basename for given resource_url ''' scheme = resource_url.parsed.scheme if scheme in ('http', 'https', 'file'): return _get_basename_based_on_url(resource_url) elif scheme in ('git', 'git+https', 'git+http'): if len(resource_url.args) == 2: # For now, git has 2 positional args, hash and path git_tree, subpath = resource_url.args basename = os.path.basename(subpath) if basename: return basename # subpath was not '/' or '' return _get_basename_based_on_url(resource_url)
[ "def", "find_resource_url_basename", "(", "self", ",", "resource_url", ")", ":", "scheme", "=", "resource_url", ".", "parsed", ".", "scheme", "if", "scheme", "in", "(", "'http'", ",", "'https'", ",", "'file'", ")", ":", "return", "_get_basename_based_on_url", ...
Figure out path basename for given resource_url
[ "Figure", "out", "path", "basename", "for", "given", "resource_url" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/resolvergraph.py#L27-L42
michaelpb/omnic
omnic/conversion/resolvergraph.py
ResolverGraph.find_destination_type
def find_destination_type(self, resource_url): ''' Given a resource_url, figure out what it would resolve into ''' resolvers = self.converters.values() for resolver in resolvers: # Not all resolvers are opinionated about destination types if not hasattr(resolver, 'get_destination_type'): continue destination_type = resolver.get_destination_type(resource_url) if destination_type: return destination_type
python
def find_destination_type(self, resource_url): ''' Given a resource_url, figure out what it would resolve into ''' resolvers = self.converters.values() for resolver in resolvers: # Not all resolvers are opinionated about destination types if not hasattr(resolver, 'get_destination_type'): continue destination_type = resolver.get_destination_type(resource_url) if destination_type: return destination_type
[ "def", "find_destination_type", "(", "self", ",", "resource_url", ")", ":", "resolvers", "=", "self", ".", "converters", ".", "values", "(", ")", "for", "resolver", "in", "resolvers", ":", "# Not all resolvers are opinionated about destination types", "if", "not", "...
Given a resource_url, figure out what it would resolve into
[ "Given", "a", "resource_url", "figure", "out", "what", "it", "would", "resolve", "into" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/resolvergraph.py#L44-L56
michaelpb/omnic
omnic/conversion/resolvergraph.py
ResolverGraph.download
async def download(self, resource_url): ''' Download given Resource URL by finding path through graph and applying each step ''' resolver_path = self.find_path_from_url(resource_url) await self.apply_resolver_path(resource_url, resolver_path)
python
async def download(self, resource_url): ''' Download given Resource URL by finding path through graph and applying each step ''' resolver_path = self.find_path_from_url(resource_url) await self.apply_resolver_path(resource_url, resolver_path)
[ "async", "def", "download", "(", "self", ",", "resource_url", ")", ":", "resolver_path", "=", "self", ".", "find_path_from_url", "(", "resource_url", ")", "await", "self", ".", "apply_resolver_path", "(", "resource_url", ",", "resolver_path", ")" ]
Download given Resource URL by finding path through graph and applying each step
[ "Download", "given", "Resource", "URL", "by", "finding", "path", "through", "graph", "and", "applying", "each", "step" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/resolvergraph.py#L76-L82
biocore/burrito-fillings
bfillings/parsinsert.py
insert_sequences_into_tree
def insert_sequences_into_tree(aln, moltype, params={}): """Returns a tree from placement of sequences """ # convert aln to phy since seq_names need fixed to run through parsinsert new_aln=get_align_for_phylip(StringIO(aln)) # convert aln to fasta in case it is not already a fasta file aln2 = Alignment(new_aln) seqs = aln2.toFasta() parsinsert_app = ParsInsert(params=params) result = parsinsert_app(seqs) # parse tree tree = DndParser(result['Tree'].read(), constructor=PhyloNode) # cleanup files result.cleanUp() return tree
python
def insert_sequences_into_tree(aln, moltype, params={}): """Returns a tree from placement of sequences """ # convert aln to phy since seq_names need fixed to run through parsinsert new_aln=get_align_for_phylip(StringIO(aln)) # convert aln to fasta in case it is not already a fasta file aln2 = Alignment(new_aln) seqs = aln2.toFasta() parsinsert_app = ParsInsert(params=params) result = parsinsert_app(seqs) # parse tree tree = DndParser(result['Tree'].read(), constructor=PhyloNode) # cleanup files result.cleanUp() return tree
[ "def", "insert_sequences_into_tree", "(", "aln", ",", "moltype", ",", "params", "=", "{", "}", ")", ":", "# convert aln to phy since seq_names need fixed to run through parsinsert", "new_aln", "=", "get_align_for_phylip", "(", "StringIO", "(", "aln", ")", ")", "# conver...
Returns a tree from placement of sequences
[ "Returns", "a", "tree", "from", "placement", "of", "sequences" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/parsinsert.py#L73-L92
biocore/burrito-fillings
bfillings/parsinsert.py
ParsInsert._get_result_paths
def _get_result_paths(self,data): """ Get the resulting tree""" result = {} result['Tree'] = ResultPath(Path=splitext(self._input_filename)[0] + \ '.tree') return result
python
def _get_result_paths(self,data): """ Get the resulting tree""" result = {} result['Tree'] = ResultPath(Path=splitext(self._input_filename)[0] + \ '.tree') return result
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "result", "=", "{", "}", "result", "[", "'Tree'", "]", "=", "ResultPath", "(", "Path", "=", "splitext", "(", "self", ".", "_input_filename", ")", "[", "0", "]", "+", "'.tree'", ")", "ret...
Get the resulting tree
[ "Get", "the", "resulting", "tree" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/parsinsert.py#L66-L71
michaelpb/omnic
omnic/conversion/resolver.py
download
async def download(resource_url): ''' Download given resource_url ''' scheme = resource_url.parsed.scheme if scheme in ('http', 'https'): await download_http(resource_url) elif scheme in ('git', 'git+https', 'git+http'): await download_git(resource_url) else: raise ValueError('Unknown URL scheme: "%s"' % scheme)
python
async def download(resource_url): ''' Download given resource_url ''' scheme = resource_url.parsed.scheme if scheme in ('http', 'https'): await download_http(resource_url) elif scheme in ('git', 'git+https', 'git+http'): await download_git(resource_url) else: raise ValueError('Unknown URL scheme: "%s"' % scheme)
[ "async", "def", "download", "(", "resource_url", ")", ":", "scheme", "=", "resource_url", ".", "parsed", ".", "scheme", "if", "scheme", "in", "(", "'http'", ",", "'https'", ")", ":", "await", "download_http", "(", "resource_url", ")", "elif", "scheme", "in...
Download given resource_url
[ "Download", "given", "resource_url" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/resolver.py#L110-L120
imu-hupeng/cos-python3-sdk
cos_lib3/cos.py
CosBucket.create_folder
def create_folder(self, dir_name): """创建目录(https://www.qcloud.com/document/product/436/6061) :param dir_name:要创建的目录的目录的名称 :return 返回True创建成功,返回False创建失败 """ if dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] self.url = "http://<Region>.file.myqcloud.com" + "/files/v2/<appid>/<bucket_name>/<dir_name>/" self.url = self.url.replace("<Region>", self.config.region).replace("<appid>", str(self.config.app_id)) self.url = str(self.url).replace("<bucket_name>", self.config.bucket).replace("<dir_name>", dir_name) self.headers['Authorization'] = CosAuth(self.config).sign_more(self.config.bucket, '', 30 ) response, content = self.http.request(uri=self.url, method='POST', body='{"op": "create", "biz_attr": ""}', headers=self.headers) if eval(content.decode('utf8')).get("code") == 0: return True else: return False
python
def create_folder(self, dir_name): """创建目录(https://www.qcloud.com/document/product/436/6061) :param dir_name:要创建的目录的目录的名称 :return 返回True创建成功,返回False创建失败 """ if dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] self.url = "http://<Region>.file.myqcloud.com" + "/files/v2/<appid>/<bucket_name>/<dir_name>/" self.url = self.url.replace("<Region>", self.config.region).replace("<appid>", str(self.config.app_id)) self.url = str(self.url).replace("<bucket_name>", self.config.bucket).replace("<dir_name>", dir_name) self.headers['Authorization'] = CosAuth(self.config).sign_more(self.config.bucket, '', 30 ) response, content = self.http.request(uri=self.url, method='POST', body='{"op": "create", "biz_attr": ""}', headers=self.headers) if eval(content.decode('utf8')).get("code") == 0: return True else: return False
[ "def", "create_folder", "(", "self", ",", "dir_name", ")", ":", "if", "dir_name", "[", "0", "]", "==", "'/'", ":", "dir_name", "=", "dir_name", "[", "1", ":", "len", "(", "dir_name", ")", "]", "self", ".", "url", "=", "\"http://<Region>.file.myqcloud.com...
创建目录(https://www.qcloud.com/document/product/436/6061) :param dir_name:要创建的目录的目录的名称 :return 返回True创建成功,返回False创建失败
[ "创建目录", "(", "https", ":", "//", "www", ".", "qcloud", ".", "com", "/", "document", "/", "product", "/", "436", "/", "6061", ")" ]
train
https://github.com/imu-hupeng/cos-python3-sdk/blob/bbe76724ebbb432cd28ba59cca7fd29108e71f4a/cos_lib3/cos.py#L43-L59
imu-hupeng/cos-python3-sdk
cos_lib3/cos.py
CosBucket.list_folder
def list_folder(self, dir_name=None, prefix=None, num=1000, context=None): """列目录(https://www.qcloud.com/document/product/436/6062) :param dir_name:文件夹名称 :param prefix:前缀 :param num:查询的文件的数量,最大支持1000,默认查询数量为1000 :param context:翻页标志,将上次查询结果的context的字段传入,即可实现翻页的功能 :return 查询结果,为json格式 """ if dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] self.url = 'http://<Region>.file.myqcloud.com/files/v2/<appid>/<bucket_name>/' self.url = self.url.replace("<Region>", self.config.region).replace("<appid>", str(self.config.app_id)).replace("<bucket_name>", self.config.bucket) if dir_name is not None: self.url = self.url + str(dir_name) + "/" if prefix is not None: self.url = self.url + str(prefix) self.url = self.url + "?op=list&num=" + str(num) if context is not None: self.url = self.url + '&context=' + str(context) self.headers['Authorization'] = CosAuth(self.config).sign_more(self.config.bucket, '', 30) response, content = self.http.request(uri=self.url, method='GET', headers=self.headers) return content.decode("utf8")
python
def list_folder(self, dir_name=None, prefix=None, num=1000, context=None): """列目录(https://www.qcloud.com/document/product/436/6062) :param dir_name:文件夹名称 :param prefix:前缀 :param num:查询的文件的数量,最大支持1000,默认查询数量为1000 :param context:翻页标志,将上次查询结果的context的字段传入,即可实现翻页的功能 :return 查询结果,为json格式 """ if dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] self.url = 'http://<Region>.file.myqcloud.com/files/v2/<appid>/<bucket_name>/' self.url = self.url.replace("<Region>", self.config.region).replace("<appid>", str(self.config.app_id)).replace("<bucket_name>", self.config.bucket) if dir_name is not None: self.url = self.url + str(dir_name) + "/" if prefix is not None: self.url = self.url + str(prefix) self.url = self.url + "?op=list&num=" + str(num) if context is not None: self.url = self.url + '&context=' + str(context) self.headers['Authorization'] = CosAuth(self.config).sign_more(self.config.bucket, '', 30) response, content = self.http.request(uri=self.url, method='GET', headers=self.headers) return content.decode("utf8")
[ "def", "list_folder", "(", "self", ",", "dir_name", "=", "None", ",", "prefix", "=", "None", ",", "num", "=", "1000", ",", "context", "=", "None", ")", ":", "if", "dir_name", "[", "0", "]", "==", "'/'", ":", "dir_name", "=", "dir_name", "[", "1", ...
列目录(https://www.qcloud.com/document/product/436/6062) :param dir_name:文件夹名称 :param prefix:前缀 :param num:查询的文件的数量,最大支持1000,默认查询数量为1000 :param context:翻页标志,将上次查询结果的context的字段传入,即可实现翻页的功能 :return 查询结果,为json格式
[ "列目录", "(", "https", ":", "//", "www", ".", "qcloud", ".", "com", "/", "document", "/", "product", "/", "436", "/", "6062", ")" ]
train
https://github.com/imu-hupeng/cos-python3-sdk/blob/bbe76724ebbb432cd28ba59cca7fd29108e71f4a/cos_lib3/cos.py#L62-L84
imu-hupeng/cos-python3-sdk
cos_lib3/cos.py
CosBucket.query_folder
def query_folder(self, dir_name): """查询目录属性(https://www.qcloud.com/document/product/436/6063) :param dir_name:查询的目录的名称 :return:查询出来的结果,为json格式 """ if dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] self.url = 'http://' + self.config.region + '.file.myqcloud.com' + '/files/v2/' + str(self.config.app_id) + '/' + self.config.bucket + '/' + dir_name + '/?op=stat' self.headers['Authorization'] = CosAuth(self.config).sign_more(self.config.bucket, '', 30) reponse, content = self.http.request(uri=self.url, method='GET',headers=self.headers) return content.decode("utf8")
python
def query_folder(self, dir_name): """查询目录属性(https://www.qcloud.com/document/product/436/6063) :param dir_name:查询的目录的名称 :return:查询出来的结果,为json格式 """ if dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] self.url = 'http://' + self.config.region + '.file.myqcloud.com' + '/files/v2/' + str(self.config.app_id) + '/' + self.config.bucket + '/' + dir_name + '/?op=stat' self.headers['Authorization'] = CosAuth(self.config).sign_more(self.config.bucket, '', 30) reponse, content = self.http.request(uri=self.url, method='GET',headers=self.headers) return content.decode("utf8")
[ "def", "query_folder", "(", "self", ",", "dir_name", ")", ":", "if", "dir_name", "[", "0", "]", "==", "'/'", ":", "dir_name", "=", "dir_name", "[", "1", ":", "len", "(", "dir_name", ")", "]", "self", ".", "url", "=", "'http://'", "+", "self", ".", ...
查询目录属性(https://www.qcloud.com/document/product/436/6063) :param dir_name:查询的目录的名称 :return:查询出来的结果,为json格式
[ "查询目录属性", "(", "https", ":", "//", "www", ".", "qcloud", ".", "com", "/", "document", "/", "product", "/", "436", "/", "6063", ")" ]
train
https://github.com/imu-hupeng/cos-python3-sdk/blob/bbe76724ebbb432cd28ba59cca7fd29108e71f4a/cos_lib3/cos.py#L86-L97
imu-hupeng/cos-python3-sdk
cos_lib3/cos.py
CosBucket.upload_file
def upload_file(self, real_file_path, file_name, dir_name=None): """简单上传文件(https://www.qcloud.com/document/product/436/6066) :param real_file_path: 文件的物理地址 :param file_name: 文件名称 :param dir_name: 文件夹名称(可选) :return:json数据串 """ if dir_name is not None and dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] if dir_name is None: dir_name = "" self.url = 'http://' + self.config.region + '.file.myqcloud.com/files/v2/' + str(self.config.app_id) + '/' + self.config.bucket if dir_name is not None: self.url = self.url + '/' + dir_name self.url = self.url + '/' + file_name headers = {} headers['Authorization'] = CosAuth(self.config).sign_more(self.config.bucket, '', 30) files = {'file': ('', open(real_file_path, 'rb'))} r = requests.post(url=self.url, data={'op': 'upload', 'biz_attr': '', 'insertOnly': '0'}, files={ 'filecontent': (real_file_path, open(real_file_path, 'rb'), 'application/octet-stream')}, headers=headers) return str(eval(r.content.decode('utf8')).get('data'))
python
def upload_file(self, real_file_path, file_name, dir_name=None): """简单上传文件(https://www.qcloud.com/document/product/436/6066) :param real_file_path: 文件的物理地址 :param file_name: 文件名称 :param dir_name: 文件夹名称(可选) :return:json数据串 """ if dir_name is not None and dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] if dir_name is None: dir_name = "" self.url = 'http://' + self.config.region + '.file.myqcloud.com/files/v2/' + str(self.config.app_id) + '/' + self.config.bucket if dir_name is not None: self.url = self.url + '/' + dir_name self.url = self.url + '/' + file_name headers = {} headers['Authorization'] = CosAuth(self.config).sign_more(self.config.bucket, '', 30) files = {'file': ('', open(real_file_path, 'rb'))} r = requests.post(url=self.url, data={'op': 'upload', 'biz_attr': '', 'insertOnly': '0'}, files={ 'filecontent': (real_file_path, open(real_file_path, 'rb'), 'application/octet-stream')}, headers=headers) return str(eval(r.content.decode('utf8')).get('data'))
[ "def", "upload_file", "(", "self", ",", "real_file_path", ",", "file_name", ",", "dir_name", "=", "None", ")", ":", "if", "dir_name", "is", "not", "None", "and", "dir_name", "[", "0", "]", "==", "'/'", ":", "dir_name", "=", "dir_name", "[", "1", ":", ...
简单上传文件(https://www.qcloud.com/document/product/436/6066) :param real_file_path: 文件的物理地址 :param file_name: 文件名称 :param dir_name: 文件夹名称(可选) :return:json数据串
[ "简单上传文件", "(", "https", ":", "//", "www", ".", "qcloud", ".", "com", "/", "document", "/", "product", "/", "436", "/", "6066", ")" ]
train
https://github.com/imu-hupeng/cos-python3-sdk/blob/bbe76724ebbb432cd28ba59cca7fd29108e71f4a/cos_lib3/cos.py#L115-L137
imu-hupeng/cos-python3-sdk
cos_lib3/cos.py
CosBucket.upload_slice_file
def upload_slice_file(self, real_file_path, slice_size, file_name, offset=0, dir_name=None): """ 此分片上传代码由GitHub用户a270443177(https://github.com/a270443177)友情提供 :param real_file_path: :param slice_size: :param file_name: :param offset: :param dir_name: :return: """ if dir_name is not None and dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] if dir_name is None: dir_name = "" self.url = 'http://' + self.config.region + '.file.myqcloud.com/files/v2/' + str( self.config.app_id) + '/' + self.config.bucket if dir_name is not None: self.url = self.url + '/' + dir_name self.url = self.url + '/' + file_name file_size = os.path.getsize(real_file_path) session = self._upload_slice_control(file_size=file_size, slice_size=slice_size) with open(real_file_path, 'rb') as local_file: while offset < file_size: file_content = local_file.read(slice_size) self._upload_slice_data(filecontent=file_content, session=session, offset=offset) offset += slice_size r = self._upload_slice_finish(session=session, file_size=file_size) return r
python
def upload_slice_file(self, real_file_path, slice_size, file_name, offset=0, dir_name=None): """ 此分片上传代码由GitHub用户a270443177(https://github.com/a270443177)友情提供 :param real_file_path: :param slice_size: :param file_name: :param offset: :param dir_name: :return: """ if dir_name is not None and dir_name[0] == '/': dir_name = dir_name[1:len(dir_name)] if dir_name is None: dir_name = "" self.url = 'http://' + self.config.region + '.file.myqcloud.com/files/v2/' + str( self.config.app_id) + '/' + self.config.bucket if dir_name is not None: self.url = self.url + '/' + dir_name self.url = self.url + '/' + file_name file_size = os.path.getsize(real_file_path) session = self._upload_slice_control(file_size=file_size, slice_size=slice_size) with open(real_file_path, 'rb') as local_file: while offset < file_size: file_content = local_file.read(slice_size) self._upload_slice_data(filecontent=file_content, session=session, offset=offset) offset += slice_size r = self._upload_slice_finish(session=session, file_size=file_size) return r
[ "def", "upload_slice_file", "(", "self", ",", "real_file_path", ",", "slice_size", ",", "file_name", ",", "offset", "=", "0", ",", "dir_name", "=", "None", ")", ":", "if", "dir_name", "is", "not", "None", "and", "dir_name", "[", "0", "]", "==", "'/'", ...
此分片上传代码由GitHub用户a270443177(https://github.com/a270443177)友情提供 :param real_file_path: :param slice_size: :param file_name: :param offset: :param dir_name: :return:
[ "此分片上传代码由GitHub用户a270443177", "(", "https", ":", "//", "github", ".", "com", "/", "a270443177", ")", "友情提供" ]
train
https://github.com/imu-hupeng/cos-python3-sdk/blob/bbe76724ebbb432cd28ba59cca7fd29108e71f4a/cos_lib3/cos.py#L163-L191
imu-hupeng/cos-python3-sdk
cos_lib3/cos.py
CosBucket.upload_file_from_url
def upload_file_from_url(self, url, file_name, dir_name=None): """简单上传文件(https://www.qcloud.com/document/product/436/6066) :param url: 文件url地址 :param file_name: 文件名称 :param dir_name: 文件夹名称(可选) :return:json数据串 """ real_file_name = str(int(time.time()*1000)) urllib.request.urlretrieve(url, real_file_name) data = self.upload_file(real_file_name, file_name, dir_name) os.remove(real_file_name) return data
python
def upload_file_from_url(self, url, file_name, dir_name=None): """简单上传文件(https://www.qcloud.com/document/product/436/6066) :param url: 文件url地址 :param file_name: 文件名称 :param dir_name: 文件夹名称(可选) :return:json数据串 """ real_file_name = str(int(time.time()*1000)) urllib.request.urlretrieve(url, real_file_name) data = self.upload_file(real_file_name, file_name, dir_name) os.remove(real_file_name) return data
[ "def", "upload_file_from_url", "(", "self", ",", "url", ",", "file_name", ",", "dir_name", "=", "None", ")", ":", "real_file_name", "=", "str", "(", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", ")", "urllib", ".", "request", ".", "url...
简单上传文件(https://www.qcloud.com/document/product/436/6066) :param url: 文件url地址 :param file_name: 文件名称 :param dir_name: 文件夹名称(可选) :return:json数据串
[ "简单上传文件", "(", "https", ":", "//", "www", ".", "qcloud", ".", "com", "/", "document", "/", "product", "/", "436", "/", "6066", ")" ]
train
https://github.com/imu-hupeng/cos-python3-sdk/blob/bbe76724ebbb432cd28ba59cca7fd29108e71f4a/cos_lib3/cos.py#L243-L255
imu-hupeng/cos-python3-sdk
cos_lib3/cos.py
CosAuth.sign_more
def sign_more(self, bucket, cos_path, expired): """多次签名(针对上传文件,创建目录, 获取文件目录属性, 拉取目录列表) :param bucket: bucket名称 :param cos_path: 要操作的cos路径, 以'/'开始 :param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒 :return: 签名字符串 """ return self.app_sign(bucket, cos_path, expired)
python
def sign_more(self, bucket, cos_path, expired): """多次签名(针对上传文件,创建目录, 获取文件目录属性, 拉取目录列表) :param bucket: bucket名称 :param cos_path: 要操作的cos路径, 以'/'开始 :param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒 :return: 签名字符串 """ return self.app_sign(bucket, cos_path, expired)
[ "def", "sign_more", "(", "self", ",", "bucket", ",", "cos_path", ",", "expired", ")", ":", "return", "self", ".", "app_sign", "(", "bucket", ",", "cos_path", ",", "expired", ")" ]
多次签名(针对上传文件,创建目录, 获取文件目录属性, 拉取目录列表) :param bucket: bucket名称 :param cos_path: 要操作的cos路径, 以'/'开始 :param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒 :return: 签名字符串
[ "多次签名", "(", "针对上传文件,创建目录", "获取文件目录属性", "拉取目录列表", ")" ]
train
https://github.com/imu-hupeng/cos-python3-sdk/blob/bbe76724ebbb432cd28ba59cca7fd29108e71f4a/cos_lib3/cos.py#L297-L305
imu-hupeng/cos-python3-sdk
cos_lib3/cos.py
CosAuth.sign_download
def sign_download(self, bucket, cos_path, expired): """下载签名(用于获取后拼接成下载链接,下载私有bucket的文件) :param bucket: bucket名称 :param cos_path: 要下载的cos文件路径, 以'/'开始 :param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒 :return: 签名字符串 """ return self.app_sign(bucket, cos_path, expired, False)
python
def sign_download(self, bucket, cos_path, expired): """下载签名(用于获取后拼接成下载链接,下载私有bucket的文件) :param bucket: bucket名称 :param cos_path: 要下载的cos文件路径, 以'/'开始 :param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒 :return: 签名字符串 """ return self.app_sign(bucket, cos_path, expired, False)
[ "def", "sign_download", "(", "self", ",", "bucket", ",", "cos_path", ",", "expired", ")", ":", "return", "self", ".", "app_sign", "(", "bucket", ",", "cos_path", ",", "expired", ",", "False", ")" ]
下载签名(用于获取后拼接成下载链接,下载私有bucket的文件) :param bucket: bucket名称 :param cos_path: 要下载的cos文件路径, 以'/'开始 :param expired: 签名过期时间, UNIX时间戳, 如想让签名在30秒后过期, 即可将expired设成当前时间加上30秒 :return: 签名字符串
[ "下载签名", "(", "用于获取后拼接成下载链接,下载私有bucket的文件", ")" ]
train
https://github.com/imu-hupeng/cos-python3-sdk/blob/bbe76724ebbb432cd28ba59cca7fd29108e71f4a/cos_lib3/cos.py#L307-L315
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.strained_001
def strained_001(self, target): ''' Returns an instance of ``IIIVZincBlendeStrained001``, which is a biaxial-strained III-V zinc blende binary alloy grown on a (001) surface. Parameters ---------- target : Alloy with ``a`` parameter or float Growth substrate, assumed to have a (001) surface, or out-of-plane strain, which is negative for tensile strain and positive for compressive strain. This is the strain measured by X-ray diffraction (XRD) symmetric omega-2theta scans. ''' if isinstance(target, Alloy): return IIIVZincBlendeStrained001(unstrained=self, substrate=target) else: return IIIVZincBlendeStrained001(unstrained=self, strain_out_of_plane=target)
python
def strained_001(self, target): ''' Returns an instance of ``IIIVZincBlendeStrained001``, which is a biaxial-strained III-V zinc blende binary alloy grown on a (001) surface. Parameters ---------- target : Alloy with ``a`` parameter or float Growth substrate, assumed to have a (001) surface, or out-of-plane strain, which is negative for tensile strain and positive for compressive strain. This is the strain measured by X-ray diffraction (XRD) symmetric omega-2theta scans. ''' if isinstance(target, Alloy): return IIIVZincBlendeStrained001(unstrained=self, substrate=target) else: return IIIVZincBlendeStrained001(unstrained=self, strain_out_of_plane=target)
[ "def", "strained_001", "(", "self", ",", "target", ")", ":", "if", "isinstance", "(", "target", ",", "Alloy", ")", ":", "return", "IIIVZincBlendeStrained001", "(", "unstrained", "=", "self", ",", "substrate", "=", "target", ")", "else", ":", "return", "III...
Returns an instance of ``IIIVZincBlendeStrained001``, which is a biaxial-strained III-V zinc blende binary alloy grown on a (001) surface. Parameters ---------- target : Alloy with ``a`` parameter or float Growth substrate, assumed to have a (001) surface, or out-of-plane strain, which is negative for tensile strain and positive for compressive strain. This is the strain measured by X-ray diffraction (XRD) symmetric omega-2theta scans.
[ "Returns", "an", "instance", "of", "IIIVZincBlendeStrained001", "which", "is", "a", "biaxial", "-", "strained", "III", "-", "V", "zinc", "blende", "binary", "alloy", "grown", "on", "a", "(", "001", ")", "surface", ".", "Parameters", "----------", "target", "...
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L38-L57
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.Eg
def Eg(self, **kwargs): ''' Returns the bandgap, Eg, in eV at a given temperature, T, in K (default=300.). ''' return min(self.Eg_Gamma(**kwargs), self.Eg_L(**kwargs), self.Eg_X(**kwargs))
python
def Eg(self, **kwargs): ''' Returns the bandgap, Eg, in eV at a given temperature, T, in K (default=300.). ''' return min(self.Eg_Gamma(**kwargs), self.Eg_L(**kwargs), self.Eg_X(**kwargs))
[ "def", "Eg", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "min", "(", "self", ".", "Eg_Gamma", "(", "*", "*", "kwargs", ")", ",", "self", ".", "Eg_L", "(", "*", "*", "kwargs", ")", ",", "self", ".", "Eg_X", "(", "*", "*", "kwargs...
Returns the bandgap, Eg, in eV at a given temperature, T, in K (default=300.).
[ "Returns", "the", "bandgap", "Eg", "in", "eV", "at", "a", "given", "temperature", "T", "in", "K", "(", "default", "=", "300", ".", ")", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L93-L100
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.F
def F(self, **kwargs): ''' Returns the Kane remote-band parameter, `F`, calculated from `Eg_Gamma_0`, `Delta_SO`, `Ep`, and `meff_e_Gamma_0`. ''' Eg = self.Eg_Gamma_0(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) meff = self.meff_e_Gamma_0(**kwargs) return (1./meff-1-(Ep*(Eg+2.*Delta_SO/3.))/(Eg*(Eg+Delta_SO)))/2
python
def F(self, **kwargs): ''' Returns the Kane remote-band parameter, `F`, calculated from `Eg_Gamma_0`, `Delta_SO`, `Ep`, and `meff_e_Gamma_0`. ''' Eg = self.Eg_Gamma_0(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) meff = self.meff_e_Gamma_0(**kwargs) return (1./meff-1-(Ep*(Eg+2.*Delta_SO/3.))/(Eg*(Eg+Delta_SO)))/2
[ "def", "F", "(", "self", ",", "*", "*", "kwargs", ")", ":", "Eg", "=", "self", ".", "Eg_Gamma_0", "(", "*", "*", "kwargs", ")", "Delta_SO", "=", "self", ".", "Delta_SO", "(", "*", "*", "kwargs", ")", "Ep", "=", "self", ".", "Ep", "(", "*", "*...
Returns the Kane remote-band parameter, `F`, calculated from `Eg_Gamma_0`, `Delta_SO`, `Ep`, and `meff_e_Gamma_0`.
[ "Returns", "the", "Kane", "remote", "-", "band", "parameter", "F", "calculated", "from", "Eg_Gamma_0", "Delta_SO", "Ep", "and", "meff_e_Gamma_0", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L132-L141
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.a
def a(self, **kwargs): ''' Returns the lattice parameter, a, in Angstroms at a given temperature, `T`, in Kelvin (default: 300 K). ''' T = kwargs.get('T', 300.) return (self.a_300K(**kwargs) + self.thermal_expansion(**kwargs) * (T - 300.))
python
def a(self, **kwargs): ''' Returns the lattice parameter, a, in Angstroms at a given temperature, `T`, in Kelvin (default: 300 K). ''' T = kwargs.get('T', 300.) return (self.a_300K(**kwargs) + self.thermal_expansion(**kwargs) * (T - 300.))
[ "def", "a", "(", "self", ",", "*", "*", "kwargs", ")", ":", "T", "=", "kwargs", ".", "get", "(", "'T'", ",", "300.", ")", "return", "(", "self", ".", "a_300K", "(", "*", "*", "kwargs", ")", "+", "self", ".", "thermal_expansion", "(", "*", "*", ...
Returns the lattice parameter, a, in Angstroms at a given temperature, `T`, in Kelvin (default: 300 K).
[ "Returns", "the", "lattice", "parameter", "a", "in", "Angstroms", "at", "a", "given", "temperature", "T", "in", "Kelvin", "(", "default", ":", "300", "K", ")", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L145-L152
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.meff_SO
def meff_SO(self, **kwargs): ''' Returns the split-off hole effective mass calculated from Eg_Gamma(T), Delta_SO, Ep and F. Interpolation of Eg_Gamma(T), Delta_SO, Ep and luttinger1, and then calculation of meff_SO is recommended for alloys. ''' Eg = self.Eg_Gamma(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) luttinger1 = self.luttinger1(**kwargs) return 1./(luttinger1 - (Ep*Delta_SO)/(3*Eg*(Eg+Delta_SO)))
python
def meff_SO(self, **kwargs): ''' Returns the split-off hole effective mass calculated from Eg_Gamma(T), Delta_SO, Ep and F. Interpolation of Eg_Gamma(T), Delta_SO, Ep and luttinger1, and then calculation of meff_SO is recommended for alloys. ''' Eg = self.Eg_Gamma(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) luttinger1 = self.luttinger1(**kwargs) return 1./(luttinger1 - (Ep*Delta_SO)/(3*Eg*(Eg+Delta_SO)))
[ "def", "meff_SO", "(", "self", ",", "*", "*", "kwargs", ")", ":", "Eg", "=", "self", ".", "Eg_Gamma", "(", "*", "*", "kwargs", ")", "Delta_SO", "=", "self", ".", "Delta_SO", "(", "*", "*", "kwargs", ")", "Ep", "=", "self", ".", "Ep", "(", "*", ...
Returns the split-off hole effective mass calculated from Eg_Gamma(T), Delta_SO, Ep and F. Interpolation of Eg_Gamma(T), Delta_SO, Ep and luttinger1, and then calculation of meff_SO is recommended for alloys.
[ "Returns", "the", "split", "-", "off", "hole", "effective", "mass", "calculated", "from", "Eg_Gamma", "(", "T", ")", "Delta_SO", "Ep", "and", "F", ".", "Interpolation", "of", "Eg_Gamma", "(", "T", ")", "Delta_SO", "Ep", "and", "luttinger1", "and", "then", ...
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L168-L180
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.meff_e_Gamma
def meff_e_Gamma(self, **kwargs): ''' Returns the electron effective mass in the Gamma-valley calculated from Eg_Gamma(T), Delta_SO, Ep and F. Interpolation of Eg_Gamma(T), Delta_SO, Ep and F, and then calculation of meff_e_Gamma is recommended for alloys. ''' Eg = self.Eg_Gamma(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) F = self.F(**kwargs) return 1./((1.+2.*F)+(Ep*(Eg+2.*Delta_SO/3.))/(Eg*(Eg+Delta_SO)))
python
def meff_e_Gamma(self, **kwargs): ''' Returns the electron effective mass in the Gamma-valley calculated from Eg_Gamma(T), Delta_SO, Ep and F. Interpolation of Eg_Gamma(T), Delta_SO, Ep and F, and then calculation of meff_e_Gamma is recommended for alloys. ''' Eg = self.Eg_Gamma(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) F = self.F(**kwargs) return 1./((1.+2.*F)+(Ep*(Eg+2.*Delta_SO/3.))/(Eg*(Eg+Delta_SO)))
[ "def", "meff_e_Gamma", "(", "self", ",", "*", "*", "kwargs", ")", ":", "Eg", "=", "self", ".", "Eg_Gamma", "(", "*", "*", "kwargs", ")", "Delta_SO", "=", "self", ".", "Delta_SO", "(", "*", "*", "kwargs", ")", "Ep", "=", "self", ".", "Ep", "(", ...
Returns the electron effective mass in the Gamma-valley calculated from Eg_Gamma(T), Delta_SO, Ep and F. Interpolation of Eg_Gamma(T), Delta_SO, Ep and F, and then calculation of meff_e_Gamma is recommended for alloys.
[ "Returns", "the", "electron", "effective", "mass", "in", "the", "Gamma", "-", "valley", "calculated", "from", "Eg_Gamma", "(", "T", ")", "Delta_SO", "Ep", "and", "F", ".", "Interpolation", "of", "Eg_Gamma", "(", "T", ")", "Delta_SO", "Ep", "and", "F", "a...
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L184-L196
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.meff_hh_110
def meff_hh_110(self, **kwargs): ''' Returns the heavy-hole band effective mass in the [110] direction, meff_hh_110, in units of electron mass. ''' return 2. / (2 * self.luttinger1(**kwargs) - self.luttinger2(**kwargs) - 3 * self.luttinger3(**kwargs))
python
def meff_hh_110(self, **kwargs): ''' Returns the heavy-hole band effective mass in the [110] direction, meff_hh_110, in units of electron mass. ''' return 2. / (2 * self.luttinger1(**kwargs) - self.luttinger2(**kwargs) - 3 * self.luttinger3(**kwargs))
[ "def", "meff_hh_110", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "2.", "/", "(", "2", "*", "self", ".", "luttinger1", "(", "*", "*", "kwargs", ")", "-", "self", ".", "luttinger2", "(", "*", "*", "kwargs", ")", "-", "3", "*", "sel...
Returns the heavy-hole band effective mass in the [110] direction, meff_hh_110, in units of electron mass.
[ "Returns", "the", "heavy", "-", "hole", "band", "effective", "mass", "in", "the", "[", "110", "]", "direction", "meff_hh_110", "in", "units", "of", "electron", "mass", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L222-L228
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.meff_lh_110
def meff_lh_110(self, **kwargs): ''' Returns the light-hole band effective mass in the [110] direction, meff_lh_110, in units of electron mass. ''' return 2. / (2 * self.luttinger1(**kwargs) + self.luttinger2(**kwargs) + 3 * self.luttinger3(**kwargs))
python
def meff_lh_110(self, **kwargs): ''' Returns the light-hole band effective mass in the [110] direction, meff_lh_110, in units of electron mass. ''' return 2. / (2 * self.luttinger1(**kwargs) + self.luttinger2(**kwargs) + 3 * self.luttinger3(**kwargs))
[ "def", "meff_lh_110", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "2.", "/", "(", "2", "*", "self", ".", "luttinger1", "(", "*", "*", "kwargs", ")", "+", "self", ".", "luttinger2", "(", "*", "*", "kwargs", ")", "+", "3", "*", "sel...
Returns the light-hole band effective mass in the [110] direction, meff_lh_110, in units of electron mass.
[ "Returns", "the", "light", "-", "hole", "band", "effective", "mass", "in", "the", "[", "110", "]", "direction", "meff_lh_110", "in", "units", "of", "electron", "mass", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L248-L254
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_alloy.py
IIIVZincBlendeAlloy.nonparabolicity
def nonparabolicity(self, **kwargs): ''' Returns the Kane band nonparabolicity parameter for the Gamma-valley. ''' Eg = self.Eg_Gamma(**kwargs) meff = self.meff_e_Gamma(**kwargs) T = kwargs.get('T', 300.) return k*T/Eg * (1 - meff)**2
python
def nonparabolicity(self, **kwargs): ''' Returns the Kane band nonparabolicity parameter for the Gamma-valley. ''' Eg = self.Eg_Gamma(**kwargs) meff = self.meff_e_Gamma(**kwargs) T = kwargs.get('T', 300.) return k*T/Eg * (1 - meff)**2
[ "def", "nonparabolicity", "(", "self", ",", "*", "*", "kwargs", ")", ":", "Eg", "=", "self", ".", "Eg_Gamma", "(", "*", "*", "kwargs", ")", "meff", "=", "self", ".", "meff_e_Gamma", "(", "*", "*", "kwargs", ")", "T", "=", "kwargs", ".", "get", "(...
Returns the Kane band nonparabolicity parameter for the Gamma-valley.
[ "Returns", "the", "Kane", "band", "nonparabolicity", "parameter", "for", "the", "Gamma", "-", "valley", "." ]
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_alloy.py#L266-L273
castelao/oceansdb
oceansdb/etopo.py
ETOPO_var_nc.crop
def crop(self, lat, lon, var): """ Crop a subset of the dataset for each var Given doy, depth, lat and lon, it returns the smallest subset that still contains the requested coordinates inside it. It handels special cases like a region around greenwich and the international date line. Accepts 0 to 360 and -180 to 180 longitude reference. It extends time and longitude coordinates, so simplify the use of series. For example, a ship track can be requested with a longitude sequence like [352, 358, 364, 369, 380]. """ dims, idx = cropIndices(self.dims, lat, lon) subset = {} for v in var: subset = {v: self.ncs[0][v][idx['yn'], idx['xn']]} return subset, dims
python
def crop(self, lat, lon, var): """ Crop a subset of the dataset for each var Given doy, depth, lat and lon, it returns the smallest subset that still contains the requested coordinates inside it. It handels special cases like a region around greenwich and the international date line. Accepts 0 to 360 and -180 to 180 longitude reference. It extends time and longitude coordinates, so simplify the use of series. For example, a ship track can be requested with a longitude sequence like [352, 358, 364, 369, 380]. """ dims, idx = cropIndices(self.dims, lat, lon) subset = {} for v in var: subset = {v: self.ncs[0][v][idx['yn'], idx['xn']]} return subset, dims
[ "def", "crop", "(", "self", ",", "lat", ",", "lon", ",", "var", ")", ":", "dims", ",", "idx", "=", "cropIndices", "(", "self", ".", "dims", ",", "lat", ",", "lon", ")", "subset", "=", "{", "}", "for", "v", "in", "var", ":", "subset", "=", "{"...
Crop a subset of the dataset for each var Given doy, depth, lat and lon, it returns the smallest subset that still contains the requested coordinates inside it. It handels special cases like a region around greenwich and the international date line. Accepts 0 to 360 and -180 to 180 longitude reference. It extends time and longitude coordinates, so simplify the use of series. For example, a ship track can be requested with a longitude sequence like [352, 358, 364, 369, 380].
[ "Crop", "a", "subset", "of", "the", "dataset", "for", "each", "var" ]
train
https://github.com/castelao/oceansdb/blob/a154c5b845845a602800f9bc53d1702d4cb0f9c5/oceansdb/etopo.py#L93-L112
castelao/oceansdb
oceansdb/etopo.py
ETOPO_var_nc.interpolate
def interpolate(self, lat, lon, var): """ Interpolate each var on the coordinates requested """ subset, dims = self.crop(lat, lon, var) if np.all([y in dims['lat'] for y in lat]) & \ np.all([x in dims['lon'] for x in lon]): yn = np.nonzero([y in lat for y in dims['lat']])[0] xn = np.nonzero([x in lon for x in dims['lon']])[0] output = {} for v in subset: # output[v] = subset[v][dn, zn, yn, xn] # Seriously that this is the way to do it?!!?? output[v] = subset[v][:, xn][yn] return output # The output coordinates shall be created only once. points_out = [] for latn in lat: for lonn in lon: points_out.append([latn, lonn]) points_out = np.array(points_out) output = {} for v in var: output[v] = ma.masked_all( (lat.size, lon.size), dtype=subset[v].dtype) # The valid data idx = np.nonzero(~ma.getmaskarray(subset[v])) if idx[0].size > 0: points = np.array([ dims['lat'][idx[0]], dims['lon'][idx[1]]]).T values = subset[v][idx] # Interpolate along the dimensions that have more than one # position, otherwise it means that the output is exactly # on that coordinate. ind = np.array( [np.unique(points[:, i]).size > 1 for i in range(points.shape[1])]) assert ind.any() values_out = griddata( np.atleast_1d(np.squeeze(points[:, ind])), values, np.atleast_1d(np.squeeze(points_out[:, ind])) ) # Remap the interpolated value back into a 4D array idx = np.isfinite(values_out) for [y, x], out in zip(points_out[idx], values_out[idx]): output[v][y==lat, x==lon] = out return output
python
def interpolate(self, lat, lon, var): """ Interpolate each var on the coordinates requested """ subset, dims = self.crop(lat, lon, var) if np.all([y in dims['lat'] for y in lat]) & \ np.all([x in dims['lon'] for x in lon]): yn = np.nonzero([y in lat for y in dims['lat']])[0] xn = np.nonzero([x in lon for x in dims['lon']])[0] output = {} for v in subset: # output[v] = subset[v][dn, zn, yn, xn] # Seriously that this is the way to do it?!!?? output[v] = subset[v][:, xn][yn] return output # The output coordinates shall be created only once. points_out = [] for latn in lat: for lonn in lon: points_out.append([latn, lonn]) points_out = np.array(points_out) output = {} for v in var: output[v] = ma.masked_all( (lat.size, lon.size), dtype=subset[v].dtype) # The valid data idx = np.nonzero(~ma.getmaskarray(subset[v])) if idx[0].size > 0: points = np.array([ dims['lat'][idx[0]], dims['lon'][idx[1]]]).T values = subset[v][idx] # Interpolate along the dimensions that have more than one # position, otherwise it means that the output is exactly # on that coordinate. ind = np.array( [np.unique(points[:, i]).size > 1 for i in range(points.shape[1])]) assert ind.any() values_out = griddata( np.atleast_1d(np.squeeze(points[:, ind])), values, np.atleast_1d(np.squeeze(points_out[:, ind])) ) # Remap the interpolated value back into a 4D array idx = np.isfinite(values_out) for [y, x], out in zip(points_out[idx], values_out[idx]): output[v][y==lat, x==lon] = out return output
[ "def", "interpolate", "(", "self", ",", "lat", ",", "lon", ",", "var", ")", ":", "subset", ",", "dims", "=", "self", ".", "crop", "(", "lat", ",", "lon", ",", "var", ")", "if", "np", ".", "all", "(", "[", "y", "in", "dims", "[", "'lat'", "]",...
Interpolate each var on the coordinates requested
[ "Interpolate", "each", "var", "on", "the", "coordinates", "requested" ]
train
https://github.com/castelao/oceansdb/blob/a154c5b845845a602800f9bc53d1702d4cb0f9c5/oceansdb/etopo.py#L129-L187
michaelpb/omnic
omnic/conversion/graph.py
ConverterGraph._setup_converter_graph
def _setup_converter_graph(self, converter_list, prune_converters): ''' Set up directed conversion graph, pruning unavailable converters as necessary ''' for converter in converter_list: if prune_converters: try: converter.configure() except ConverterUnavailable as e: log.warning('%s unavailable: %s' % (converter.__class__.__name__, str(e))) continue for in_ in converter.inputs: for out in converter.outputs: self.dgraph.add_edge(in_, out, converter.cost) self.converters[(in_, out)] = converter if hasattr(converter, 'direct_outputs'): self._setup_direct_converter(converter)
python
def _setup_converter_graph(self, converter_list, prune_converters): ''' Set up directed conversion graph, pruning unavailable converters as necessary ''' for converter in converter_list: if prune_converters: try: converter.configure() except ConverterUnavailable as e: log.warning('%s unavailable: %s' % (converter.__class__.__name__, str(e))) continue for in_ in converter.inputs: for out in converter.outputs: self.dgraph.add_edge(in_, out, converter.cost) self.converters[(in_, out)] = converter if hasattr(converter, 'direct_outputs'): self._setup_direct_converter(converter)
[ "def", "_setup_converter_graph", "(", "self", ",", "converter_list", ",", "prune_converters", ")", ":", "for", "converter", "in", "converter_list", ":", "if", "prune_converters", ":", "try", ":", "converter", ".", "configure", "(", ")", "except", "ConverterUnavail...
Set up directed conversion graph, pruning unavailable converters as necessary
[ "Set", "up", "directed", "conversion", "graph", "pruning", "unavailable", "converters", "as", "necessary" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/graph.py#L38-L58
michaelpb/omnic
omnic/conversion/graph.py
ConverterGraph._setup_preferred_paths
def _setup_preferred_paths(self, preferred_conversion_paths): ''' Add given valid preferred conversion paths ''' for path in preferred_conversion_paths: for pair in pair_looper(path): if pair not in self.converters: log.warning('Invalid conversion path %s, unknown step %s' % (repr(path), repr(pair))) break else: # If it did not break, then add to dgraph self.dgraph.add_preferred_path(*path)
python
def _setup_preferred_paths(self, preferred_conversion_paths): ''' Add given valid preferred conversion paths ''' for path in preferred_conversion_paths: for pair in pair_looper(path): if pair not in self.converters: log.warning('Invalid conversion path %s, unknown step %s' % (repr(path), repr(pair))) break else: # If it did not break, then add to dgraph self.dgraph.add_preferred_path(*path)
[ "def", "_setup_preferred_paths", "(", "self", ",", "preferred_conversion_paths", ")", ":", "for", "path", "in", "preferred_conversion_paths", ":", "for", "pair", "in", "pair_looper", "(", "path", ")", ":", "if", "pair", "not", "in", "self", ".", "converters", ...
Add given valid preferred conversion paths
[ "Add", "given", "valid", "preferred", "conversion", "paths" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/graph.py#L60-L72
michaelpb/omnic
omnic/conversion/graph.py
ConverterGraph._setup_profiles
def _setup_profiles(self, conversion_profiles): ''' Add given conversion profiles checking for invalid profiles ''' # Check for invalid profiles for key, path in conversion_profiles.items(): if isinstance(path, str): path = (path, ) for left, right in pair_looper(path): pair = (_format(left), _format(right)) if pair not in self.converters: msg = 'Invalid conversion profile %s, unknown step %s' log.warning(msg % (repr(key), repr(pair))) break else: # If it did not break, then add to conversion profiles self.conversion_profiles[key] = path
python
def _setup_profiles(self, conversion_profiles): ''' Add given conversion profiles checking for invalid profiles ''' # Check for invalid profiles for key, path in conversion_profiles.items(): if isinstance(path, str): path = (path, ) for left, right in pair_looper(path): pair = (_format(left), _format(right)) if pair not in self.converters: msg = 'Invalid conversion profile %s, unknown step %s' log.warning(msg % (repr(key), repr(pair))) break else: # If it did not break, then add to conversion profiles self.conversion_profiles[key] = path
[ "def", "_setup_profiles", "(", "self", ",", "conversion_profiles", ")", ":", "# Check for invalid profiles", "for", "key", ",", "path", "in", "conversion_profiles", ".", "items", "(", ")", ":", "if", "isinstance", "(", "path", ",", "str", ")", ":", "path", "...
Add given conversion profiles checking for invalid profiles
[ "Add", "given", "conversion", "profiles", "checking", "for", "invalid", "profiles" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/graph.py#L74-L90
michaelpb/omnic
omnic/conversion/graph.py
ConverterGraph._setup_direct_converter
def _setup_direct_converter(self, converter): ''' Given a converter, set up the direct_output routes for conversions, which is used for transcoding between similar datatypes. ''' inputs = ( converter.direct_inputs if hasattr(converter, 'direct_inputs') else converter.inputs ) for in_ in inputs: for out in converter.direct_outputs: self.direct_converters[(in_, out)] = converter
python
def _setup_direct_converter(self, converter): ''' Given a converter, set up the direct_output routes for conversions, which is used for transcoding between similar datatypes. ''' inputs = ( converter.direct_inputs if hasattr(converter, 'direct_inputs') else converter.inputs ) for in_ in inputs: for out in converter.direct_outputs: self.direct_converters[(in_, out)] = converter
[ "def", "_setup_direct_converter", "(", "self", ",", "converter", ")", ":", "inputs", "=", "(", "converter", ".", "direct_inputs", "if", "hasattr", "(", "converter", ",", "'direct_inputs'", ")", "else", "converter", ".", "inputs", ")", "for", "in_", "in", "in...
Given a converter, set up the direct_output routes for conversions, which is used for transcoding between similar datatypes.
[ "Given", "a", "converter", "set", "up", "the", "direct_output", "routes", "for", "conversions", "which", "is", "used", "for", "transcoding", "between", "similar", "datatypes", "." ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/graph.py#L92-L104
michaelpb/omnic
omnic/conversion/graph.py
ConverterGraph.find_path
def find_path(self, in_, out): ''' Given an input and output TypeString, produce a graph traversal, keeping in mind special options like Conversion Profiles, Preferred Paths, and Direct Conversions. ''' if in_.arguments: raise ValueError('Cannot originate path in argumented TypeString') # Determine conversion profile. This is either simply the output, OR, # if a custom profile has been specified for this output, that custom # path or type is used. profile = self.conversion_profiles.get(str(out), str(out)) if isinstance(profile, str): profile = (profile, ) types_by_format = {_format(s): TypeString(s) for s in profile} # Normalize input and output types to string in_str = str(in_) out_str = _format(profile[0]) # First check for direct conversions, returning immediately if found direct_converter = self.direct_converters.get((in_str, out_str)) if direct_converter: out_ts = types_by_format.get(out_str, TypeString(out_str)) return [(direct_converter, TypeString(in_str), out_ts)] # No direct conversions was found, so find path through graph. # If profile was plural, add in extra steps. path = self.dgraph.shortest_path(in_str, out_str) path += profile[1:] # Loop through each edge traversal, adding converters and type # string pairs as we go along. This is to ensure conversion # profiles that have arguments mid-profile get included. results = [] for left, right in pair_looper(path): converter = self.converters.get((_format(left), _format(right))) right_typestring = types_by_format.get(right, TypeString(right)) results.append((converter, TypeString(left), right_typestring)) return results
python
def find_path(self, in_, out): ''' Given an input and output TypeString, produce a graph traversal, keeping in mind special options like Conversion Profiles, Preferred Paths, and Direct Conversions. ''' if in_.arguments: raise ValueError('Cannot originate path in argumented TypeString') # Determine conversion profile. This is either simply the output, OR, # if a custom profile has been specified for this output, that custom # path or type is used. profile = self.conversion_profiles.get(str(out), str(out)) if isinstance(profile, str): profile = (profile, ) types_by_format = {_format(s): TypeString(s) for s in profile} # Normalize input and output types to string in_str = str(in_) out_str = _format(profile[0]) # First check for direct conversions, returning immediately if found direct_converter = self.direct_converters.get((in_str, out_str)) if direct_converter: out_ts = types_by_format.get(out_str, TypeString(out_str)) return [(direct_converter, TypeString(in_str), out_ts)] # No direct conversions was found, so find path through graph. # If profile was plural, add in extra steps. path = self.dgraph.shortest_path(in_str, out_str) path += profile[1:] # Loop through each edge traversal, adding converters and type # string pairs as we go along. This is to ensure conversion # profiles that have arguments mid-profile get included. results = [] for left, right in pair_looper(path): converter = self.converters.get((_format(left), _format(right))) right_typestring = types_by_format.get(right, TypeString(right)) results.append((converter, TypeString(left), right_typestring)) return results
[ "def", "find_path", "(", "self", ",", "in_", ",", "out", ")", ":", "if", "in_", ".", "arguments", ":", "raise", "ValueError", "(", "'Cannot originate path in argumented TypeString'", ")", "# Determine conversion profile. This is either simply the output, OR,", "# if a custo...
Given an input and output TypeString, produce a graph traversal, keeping in mind special options like Conversion Profiles, Preferred Paths, and Direct Conversions.
[ "Given", "an", "input", "and", "output", "TypeString", "produce", "a", "graph", "traversal", "keeping", "in", "mind", "special", "options", "like", "Conversion", "Profiles", "Preferred", "Paths", "and", "Direct", "Conversions", "." ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/graph.py#L106-L146
michaelpb/omnic
omnic/conversion/graph.py
ConverterGraph.find_path_with_profiles
def find_path_with_profiles(self, conversion_profiles, in_, out): ''' Like find_path, except forces the conversion profiles to be the given conversion profile setting. Useful for "temporarily overriding" the global conversion profiles with your own. ''' original_profiles = dict(self.conversion_profiles) self._setup_profiles(conversion_profiles) results = self.find_path(in_, out) self.conversion_profiles = original_profiles return results
python
def find_path_with_profiles(self, conversion_profiles, in_, out): ''' Like find_path, except forces the conversion profiles to be the given conversion profile setting. Useful for "temporarily overriding" the global conversion profiles with your own. ''' original_profiles = dict(self.conversion_profiles) self._setup_profiles(conversion_profiles) results = self.find_path(in_, out) self.conversion_profiles = original_profiles return results
[ "def", "find_path_with_profiles", "(", "self", ",", "conversion_profiles", ",", "in_", ",", "out", ")", ":", "original_profiles", "=", "dict", "(", "self", ".", "conversion_profiles", ")", "self", ".", "_setup_profiles", "(", "conversion_profiles", ")", "results",...
Like find_path, except forces the conversion profiles to be the given conversion profile setting. Useful for "temporarily overriding" the global conversion profiles with your own.
[ "Like", "find_path", "except", "forces", "the", "conversion", "profiles", "to", "be", "the", "given", "conversion", "profile", "setting", ".", "Useful", "for", "temporarily", "overriding", "the", "global", "conversion", "profiles", "with", "your", "own", "." ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/graph.py#L148-L158
dailymuse/oz
oz/error_pages/__init__.py
get_lines_from_file
def get_lines_from_file(filename, lineno, context_lines): """ Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). """ def get_lines(start, end): return [linecache.getline(filename, l).rstrip() for l in range(start, end)] lower_bound = max(1, lineno - context_lines) upper_bound = lineno + context_lines linecache.checkcache(filename) pre_context = get_lines(lower_bound, lineno) context_line = linecache.getline(filename, lineno).rstrip() post_context = get_lines(lineno + 1, upper_bound) return lower_bound, pre_context, context_line, post_context
python
def get_lines_from_file(filename, lineno, context_lines): """ Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). """ def get_lines(start, end): return [linecache.getline(filename, l).rstrip() for l in range(start, end)] lower_bound = max(1, lineno - context_lines) upper_bound = lineno + context_lines linecache.checkcache(filename) pre_context = get_lines(lower_bound, lineno) context_line = linecache.getline(filename, lineno).rstrip() post_context = get_lines(lineno + 1, upper_bound) return lower_bound, pre_context, context_line, post_context
[ "def", "get_lines_from_file", "(", "filename", ",", "lineno", ",", "context_lines", ")", ":", "def", "get_lines", "(", "start", ",", "end", ")", ":", "return", "[", "linecache", ".", "getline", "(", "filename", ",", "l", ")", ".", "rstrip", "(", ")", "...
Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context).
[ "Returns", "context_lines", "before", "and", "after", "lineno", "from", "file", ".", "Returns", "(", "pre_context_lineno", "pre_context", "context_line", "post_context", ")", "." ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/error_pages/__init__.py#L37-L52
dailymuse/oz
oz/error_pages/__init__.py
get_frames
def get_frames(tback, is_breakpoint): """Builds a list of ErrorFrame objects from a traceback""" frames = [] while tback is not None: if tback.tb_next is None and is_breakpoint: break filename = tback.tb_frame.f_code.co_filename function = tback.tb_frame.f_code.co_name context = tback.tb_frame.f_locals lineno = tback.tb_lineno - 1 tback_id = id(tback) pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno + 1, 7) frames.append(ErrorFrame(tback, filename, function, lineno, context, tback_id, pre_context, context_line, post_context, pre_context_lineno)) tback = tback.tb_next return frames
python
def get_frames(tback, is_breakpoint): """Builds a list of ErrorFrame objects from a traceback""" frames = [] while tback is not None: if tback.tb_next is None and is_breakpoint: break filename = tback.tb_frame.f_code.co_filename function = tback.tb_frame.f_code.co_name context = tback.tb_frame.f_locals lineno = tback.tb_lineno - 1 tback_id = id(tback) pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno + 1, 7) frames.append(ErrorFrame(tback, filename, function, lineno, context, tback_id, pre_context, context_line, post_context, pre_context_lineno)) tback = tback.tb_next return frames
[ "def", "get_frames", "(", "tback", ",", "is_breakpoint", ")", ":", "frames", "=", "[", "]", "while", "tback", "is", "not", "None", ":", "if", "tback", ".", "tb_next", "is", "None", "and", "is_breakpoint", ":", "break", "filename", "=", "tback", ".", "t...
Builds a list of ErrorFrame objects from a traceback
[ "Builds", "a", "list", "of", "ErrorFrame", "objects", "from", "a", "traceback" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/error_pages/__init__.py#L54-L72
dailymuse/oz
oz/error_pages/__init__.py
prettify_object
def prettify_object(obj): """Makes a pretty string for an object for nice output""" try: return pprint.pformat(str(obj)) except UnicodeDecodeError as e: raise except Exception as e: return "[could not display: <%s: %s>]" % (e.__class__.__name__, str(e))
python
def prettify_object(obj): """Makes a pretty string for an object for nice output""" try: return pprint.pformat(str(obj)) except UnicodeDecodeError as e: raise except Exception as e: return "[could not display: <%s: %s>]" % (e.__class__.__name__, str(e))
[ "def", "prettify_object", "(", "obj", ")", ":", "try", ":", "return", "pprint", ".", "pformat", "(", "str", "(", "obj", ")", ")", "except", "UnicodeDecodeError", "as", "e", ":", "raise", "except", "Exception", "as", "e", ":", "return", "\"[could not displa...
Makes a pretty string for an object for nice output
[ "Makes", "a", "pretty", "string", "for", "an", "object", "for", "nice", "output" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/error_pages/__init__.py#L74-L82
natea/django-deployer
django_deployer/utils.py
clone_git_repo
def clone_git_repo(repo_url): """ input: repo_url output: path of the cloned repository steps: 1. clone the repo 2. parse 'site' into for templating assumptions: repo_url = "git@github.com:littleq0903/django-deployer-template-openshift-experiment.git" repo_local_location = "/tmp/djangodeployer-cache-xxxx" # xxxx here will be some short uuid for identify different downloads """ REPO_PREFIX = "djangodeployer-cache-" REPO_POSTFIX_UUID = str(uuid.uuid4()).split('-')[-1] REPO_CACHE_NAME = REPO_PREFIX + REPO_POSTFIX_UUID REPO_CACHE_LOCATION = '/tmp/%s' % REPO_CACHE_NAME repo = git.Repo.clone_from(repo_url, REPO_CACHE_LOCATION) return REPO_CACHE_LOCATION
python
def clone_git_repo(repo_url): """ input: repo_url output: path of the cloned repository steps: 1. clone the repo 2. parse 'site' into for templating assumptions: repo_url = "git@github.com:littleq0903/django-deployer-template-openshift-experiment.git" repo_local_location = "/tmp/djangodeployer-cache-xxxx" # xxxx here will be some short uuid for identify different downloads """ REPO_PREFIX = "djangodeployer-cache-" REPO_POSTFIX_UUID = str(uuid.uuid4()).split('-')[-1] REPO_CACHE_NAME = REPO_PREFIX + REPO_POSTFIX_UUID REPO_CACHE_LOCATION = '/tmp/%s' % REPO_CACHE_NAME repo = git.Repo.clone_from(repo_url, REPO_CACHE_LOCATION) return REPO_CACHE_LOCATION
[ "def", "clone_git_repo", "(", "repo_url", ")", ":", "REPO_PREFIX", "=", "\"djangodeployer-cache-\"", "REPO_POSTFIX_UUID", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", ".", "split", "(", "'-'", ")", "[", "-", "1", "]", "REPO_CACHE_NAME", "=", "REPO...
input: repo_url output: path of the cloned repository steps: 1. clone the repo 2. parse 'site' into for templating assumptions: repo_url = "git@github.com:littleq0903/django-deployer-template-openshift-experiment.git" repo_local_location = "/tmp/djangodeployer-cache-xxxx" # xxxx here will be some short uuid for identify different downloads
[ "input", ":", "repo_url", "output", ":", "path", "of", "the", "cloned", "repository", "steps", ":", "1", ".", "clone", "the", "repo", "2", ".", "parse", "site", "into", "for", "templating" ]
train
https://github.com/natea/django-deployer/blob/5ce7d972db2f8500ec53ad89e7eb312d3360d074/django_deployer/utils.py#L6-L24
natea/django-deployer
django_deployer/utils.py
get_template_filelist
def get_template_filelist(repo_path, ignore_files=[], ignore_folders=[]): """ input: local repo path output: path list of files which need to be rendered """ default_ignore_files = ['.gitignore'] default_ignore_folders = ['.git'] ignore_files += default_ignore_files ignore_folders += default_ignore_folders filelist = [] for root, folders, files in os.walk(repo_path): for ignore_file in ignore_files: if ignore_file in files: files.remove(ignore_file) for ignore_folder in ignore_folders: if ignore_folder in folders: folders.remove(ignore_folder) for file_name in files: filelist.append( '%s/%s' % (root, file_name)) return filelist
python
def get_template_filelist(repo_path, ignore_files=[], ignore_folders=[]): """ input: local repo path output: path list of files which need to be rendered """ default_ignore_files = ['.gitignore'] default_ignore_folders = ['.git'] ignore_files += default_ignore_files ignore_folders += default_ignore_folders filelist = [] for root, folders, files in os.walk(repo_path): for ignore_file in ignore_files: if ignore_file in files: files.remove(ignore_file) for ignore_folder in ignore_folders: if ignore_folder in folders: folders.remove(ignore_folder) for file_name in files: filelist.append( '%s/%s' % (root, file_name)) return filelist
[ "def", "get_template_filelist", "(", "repo_path", ",", "ignore_files", "=", "[", "]", ",", "ignore_folders", "=", "[", "]", ")", ":", "default_ignore_files", "=", "[", "'.gitignore'", "]", "default_ignore_folders", "=", "[", "'.git'", "]", "ignore_files", "+=", ...
input: local repo path output: path list of files which need to be rendered
[ "input", ":", "local", "repo", "path", "output", ":", "path", "list", "of", "files", "which", "need", "to", "be", "rendered" ]
train
https://github.com/natea/django-deployer/blob/5ce7d972db2f8500ec53ad89e7eb312d3360d074/django_deployer/utils.py#L26-L52
natea/django-deployer
django_deployer/utils.py
render_from_repo
def render_from_repo(repo_path, to_path, template_params, settings_dir): """ rendering all files into the target directory """ TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME = 'deployer_project' repo_path = repo_path.rstrip('/') to_path = to_path.rstrip('/') files_to_render = get_template_filelist(repo_path, ignore_folders=[TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME]) # rendering generic deploy files for single_file_path in files_to_render: source_file_path = single_file_path dest_file_path = source_file_path.replace(repo_path, to_path) render_from_single_file(source_file_path, dest_file_path, template_params) settings_template_dir = os.path.join(repo_path, TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME) settings_files = get_template_filelist(settings_template_dir) # rendering settings file for single_file_path in settings_files: source = single_file_path dest = single_file_path.replace(settings_template_dir, settings_dir) render_from_single_file(source, dest, template_params)
python
def render_from_repo(repo_path, to_path, template_params, settings_dir): """ rendering all files into the target directory """ TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME = 'deployer_project' repo_path = repo_path.rstrip('/') to_path = to_path.rstrip('/') files_to_render = get_template_filelist(repo_path, ignore_folders=[TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME]) # rendering generic deploy files for single_file_path in files_to_render: source_file_path = single_file_path dest_file_path = source_file_path.replace(repo_path, to_path) render_from_single_file(source_file_path, dest_file_path, template_params) settings_template_dir = os.path.join(repo_path, TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME) settings_files = get_template_filelist(settings_template_dir) # rendering settings file for single_file_path in settings_files: source = single_file_path dest = single_file_path.replace(settings_template_dir, settings_dir) render_from_single_file(source, dest, template_params)
[ "def", "render_from_repo", "(", "repo_path", ",", "to_path", ",", "template_params", ",", "settings_dir", ")", ":", "TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME", "=", "'deployer_project'", "repo_path", "=", "repo_path", ".", "rstrip", "(", "'/'", ")", "to_path", "=", ...
rendering all files into the target directory
[ "rendering", "all", "files", "into", "the", "target", "directory" ]
train
https://github.com/natea/django-deployer/blob/5ce7d972db2f8500ec53ad89e7eb312d3360d074/django_deployer/utils.py#L55-L80
9b/frisbee
frisbee/utils.py
gen_logger
def gen_logger(name: str, log_level: int=logging.INFO) -> logging.Logger: """Create a logger to be used between processes. :returns: Logging instance. """ logger = logging.getLogger(name) logger.setLevel(log_level) shandler: logging.StreamHandler = logging.StreamHandler(sys.stdout) fmt: str = '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():' fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s' shandler.setFormatter(logging.Formatter(fmt)) logger.addHandler(shandler) return logger
python
def gen_logger(name: str, log_level: int=logging.INFO) -> logging.Logger: """Create a logger to be used between processes. :returns: Logging instance. """ logger = logging.getLogger(name) logger.setLevel(log_level) shandler: logging.StreamHandler = logging.StreamHandler(sys.stdout) fmt: str = '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():' fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s' shandler.setFormatter(logging.Formatter(fmt)) logger.addHandler(shandler) return logger
[ "def", "gen_logger", "(", "name", ":", "str", ",", "log_level", ":", "int", "=", "logging", ".", "INFO", ")", "->", "logging", ".", "Logger", ":", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "logger", ".", "setLevel", "(", "log_level"...
Create a logger to be used between processes. :returns: Logging instance.
[ "Create", "a", "logger", "to", "be", "used", "between", "processes", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/utils.py#L13-L25
9b/frisbee
frisbee/utils.py
gen_headers
def gen_headers() -> Dict[str, str]: """Generate a header pairing.""" ua_list: List[str] = ['Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36'] headers: Dict[str, str] = {'User-Agent': ua_list[random.randint(0, len(ua_list) - 1)]} return headers
python
def gen_headers() -> Dict[str, str]: """Generate a header pairing.""" ua_list: List[str] = ['Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36'] headers: Dict[str, str] = {'User-Agent': ua_list[random.randint(0, len(ua_list) - 1)]} return headers
[ "def", "gen_headers", "(", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "ua_list", ":", "List", "[", "str", "]", "=", "[", "'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36'", "]", "headers", ":", "...
Generate a header pairing.
[ "Generate", "a", "header", "pairing", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/utils.py#L28-L32
9b/frisbee
frisbee/utils.py
extract_emails
def extract_emails(results: str, domain: str, fuzzy: bool) -> List[str]: """Grab email addresses from raw text data.""" pattern: Pattern = re.compile(r'([\w.-]+@[\w.-]+)') hits: List[str] = pattern.findall(results) if fuzzy: seed = domain.split('.')[0] emails: List[str] = [x.lower() for x in hits if x.split('@')[1].__contains__(seed)] else: emails: List[str] = [x.lower() for x in hits if x.endswith(domain)] return list(set(emails))
python
def extract_emails(results: str, domain: str, fuzzy: bool) -> List[str]: """Grab email addresses from raw text data.""" pattern: Pattern = re.compile(r'([\w.-]+@[\w.-]+)') hits: List[str] = pattern.findall(results) if fuzzy: seed = domain.split('.')[0] emails: List[str] = [x.lower() for x in hits if x.split('@')[1].__contains__(seed)] else: emails: List[str] = [x.lower() for x in hits if x.endswith(domain)] return list(set(emails))
[ "def", "extract_emails", "(", "results", ":", "str", ",", "domain", ":", "str", ",", "fuzzy", ":", "bool", ")", "->", "List", "[", "str", "]", ":", "pattern", ":", "Pattern", "=", "re", ".", "compile", "(", "r'([\\w.-]+@[\\w.-]+)'", ")", "hits", ":", ...
Grab email addresses from raw text data.
[ "Grab", "email", "addresses", "from", "raw", "text", "data", "." ]
train
https://github.com/9b/frisbee/blob/2c958ec1d09bf5b28e6d1c867539b1a5325e6ce7/frisbee/utils.py#L45-L54
biocore/burrito-fillings
bfillings/blast.py
seqs_to_stream
def seqs_to_stream(seqs, ih): """Converts seqs into stream of FASTA records, depending on input handler. Each FASTA record will be a list of lines. """ if ih == '_input_as_multiline_string': recs = FastaFinder(seqs.split('\n')) elif ih == '_input_as_string': recs = FastaFinder(open(seqs)) elif ih == '_input_as_seqs': recs = [['>'+str(i), s] for i, s in enumerate(seqs)] elif ih == '_input_as_lines': recs = FastaFinder(seqs) else: raise TypeError, "Unknown input handler %s" % ih return recs
python
def seqs_to_stream(seqs, ih): """Converts seqs into stream of FASTA records, depending on input handler. Each FASTA record will be a list of lines. """ if ih == '_input_as_multiline_string': recs = FastaFinder(seqs.split('\n')) elif ih == '_input_as_string': recs = FastaFinder(open(seqs)) elif ih == '_input_as_seqs': recs = [['>'+str(i), s] for i, s in enumerate(seqs)] elif ih == '_input_as_lines': recs = FastaFinder(seqs) else: raise TypeError, "Unknown input handler %s" % ih return recs
[ "def", "seqs_to_stream", "(", "seqs", ",", "ih", ")", ":", "if", "ih", "==", "'_input_as_multiline_string'", ":", "recs", "=", "FastaFinder", "(", "seqs", ".", "split", "(", "'\\n'", ")", ")", "elif", "ih", "==", "'_input_as_string'", ":", "recs", "=", "...
Converts seqs into stream of FASTA records, depending on input handler. Each FASTA record will be a list of lines.
[ "Converts", "seqs", "into", "stream", "of", "FASTA", "records", "depending", "on", "input", "handler", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L601-L616
biocore/burrito-fillings
bfillings/blast.py
blast_seqs
def blast_seqs(seqs, blast_constructor, blast_db=None, blast_mat_root=None, params={}, add_seq_names=True, out_filename=None, WorkingDir=None, SuppressStderr=None, SuppressStdout=None, input_handler=None, HALT_EXEC=False ): """Blast list of sequences. seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules for data are as follows. If it's s list, treat as lines, unless add_seq_names is true (in which case treat as list of seqs). If it's a string, test whether it has newlines. If it doesn't have newlines, assume it's a filename. If it does have newlines, it can't be a filename, so assume it's a multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle """ # set num keep if blast_db: params["-d"] = blast_db if out_filename: params["-o"] = out_filename ih = input_handler or guess_input_handler(seqs, add_seq_names) blast_app = blast_constructor( params=params, blast_mat_root=blast_mat_root, InputHandler=ih, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, HALT_EXEC=HALT_EXEC) return blast_app(seqs)
python
def blast_seqs(seqs, blast_constructor, blast_db=None, blast_mat_root=None, params={}, add_seq_names=True, out_filename=None, WorkingDir=None, SuppressStderr=None, SuppressStdout=None, input_handler=None, HALT_EXEC=False ): """Blast list of sequences. seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules for data are as follows. If it's s list, treat as lines, unless add_seq_names is true (in which case treat as list of seqs). If it's a string, test whether it has newlines. If it doesn't have newlines, assume it's a filename. If it does have newlines, it can't be a filename, so assume it's a multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle """ # set num keep if blast_db: params["-d"] = blast_db if out_filename: params["-o"] = out_filename ih = input_handler or guess_input_handler(seqs, add_seq_names) blast_app = blast_constructor( params=params, blast_mat_root=blast_mat_root, InputHandler=ih, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, HALT_EXEC=HALT_EXEC) return blast_app(seqs)
[ "def", "blast_seqs", "(", "seqs", ",", "blast_constructor", ",", "blast_db", "=", "None", ",", "blast_mat_root", "=", "None", ",", "params", "=", "{", "}", ",", "add_seq_names", "=", "True", ",", "out_filename", "=", "None", ",", "WorkingDir", "=", "None",...
Blast list of sequences. seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules for data are as follows. If it's s list, treat as lines, unless add_seq_names is true (in which case treat as list of seqs). If it's a string, test whether it has newlines. If it doesn't have newlines, assume it's a filename. If it does have newlines, it can't be a filename, so assume it's a multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle
[ "Blast", "list", "of", "sequences", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L619-L671
biocore/burrito-fillings
bfillings/blast.py
fasta_cmd_get_seqs
def fasta_cmd_get_seqs(acc_list, blast_db=None, is_protein=None, out_filename=None, params={}, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None): """Retrieve sequences for list of accessions """ if is_protein is None: params["-p"] = 'G' elif is_protein: params["-p"] = 'T' else: params["-p"] = 'F' if blast_db: params["-d"] = blast_db if out_filename: params["-o"] = out_filename # turn off duplicate accessions params["-a"] = "F" # create Psi-BLAST fasta_cmd = FastaCmd(params=params, InputHandler='_input_as_string', WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) # return results return fasta_cmd("\"%s\"" % ','.join(acc_list))
python
def fasta_cmd_get_seqs(acc_list, blast_db=None, is_protein=None, out_filename=None, params={}, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None): """Retrieve sequences for list of accessions """ if is_protein is None: params["-p"] = 'G' elif is_protein: params["-p"] = 'T' else: params["-p"] = 'F' if blast_db: params["-d"] = blast_db if out_filename: params["-o"] = out_filename # turn off duplicate accessions params["-a"] = "F" # create Psi-BLAST fasta_cmd = FastaCmd(params=params, InputHandler='_input_as_string', WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) # return results return fasta_cmd("\"%s\"" % ','.join(acc_list))
[ "def", "fasta_cmd_get_seqs", "(", "acc_list", ",", "blast_db", "=", "None", ",", "is_protein", "=", "None", ",", "out_filename", "=", "None", ",", "params", "=", "{", "}", ",", "WorkingDir", "=", "tempfile", ".", "gettempdir", "(", ")", ",", "SuppressStder...
Retrieve sequences for list of accessions
[ "Retrieve", "sequences", "for", "list", "of", "accessions" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L674-L708
biocore/burrito-fillings
bfillings/blast.py
seqs_from_fastacmd
def seqs_from_fastacmd(acc_list, blast_db,is_protein=True): """Get dict of description:seq from fastacmd.""" fasta_cmd_res = fasta_cmd_get_seqs(acc_list, blast_db=blast_db, \ is_protein=is_protein) recs = FastaCmdFinder(fasta_cmd_res['StdOut']) result = {} for rec in recs: try: result[rec[0][1:].strip()] = ''.join(map(strip, rec[1:])) except IndexError: #maybe we didn't get a sequence? pass fasta_cmd_res.cleanUp() return result
python
def seqs_from_fastacmd(acc_list, blast_db,is_protein=True): """Get dict of description:seq from fastacmd.""" fasta_cmd_res = fasta_cmd_get_seqs(acc_list, blast_db=blast_db, \ is_protein=is_protein) recs = FastaCmdFinder(fasta_cmd_res['StdOut']) result = {} for rec in recs: try: result[rec[0][1:].strip()] = ''.join(map(strip, rec[1:])) except IndexError: #maybe we didn't get a sequence? pass fasta_cmd_res.cleanUp() return result
[ "def", "seqs_from_fastacmd", "(", "acc_list", ",", "blast_db", ",", "is_protein", "=", "True", ")", ":", "fasta_cmd_res", "=", "fasta_cmd_get_seqs", "(", "acc_list", ",", "blast_db", "=", "blast_db", ",", "is_protein", "=", "is_protein", ")", "recs", "=", "Fas...
Get dict of description:seq from fastacmd.
[ "Get", "dict", "of", "description", ":", "seq", "from", "fastacmd", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L716-L728
biocore/burrito-fillings
bfillings/blast.py
psiblast_n_neighbors
def psiblast_n_neighbors(seqs, n=100, blast_db=None, core_threshold=1e-50, extra_threshold=1e-10, lower_threshold=1e-6, step=100, method="two-step", blast_mat_root=None, params={}, add_seq_names=False, WorkingDir=None, SuppressStderr=None, SuppressStdout=None, input_handler=None, scorer=3, #shotgun with 3 hits needed to keep second_db=None ): """PsiBlasts sequences, stopping when n neighbors are reached. core_threshold: threshold for the core profile (default: 1e-50) extra_threshold: threshold for pulling in additional seqs (default:1e-10) lower_threshold: threshold for seqs in final round (default:1e-6) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle """ if blast_db: params["-d"] = blast_db ih = input_handler or guess_input_handler(seqs, add_seq_names) recs = seqs_to_stream(seqs, ih) #checkpointing can only handle one seq... #set up the parameters for the core and additional runs max_iterations = params['-j'] params['-j'] = 2 #won't checkpoint with single iteration app = PsiBlast(params=params, blast_mat_root=blast_mat_root, InputHandler='_input_as_lines', WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, ) result = {} for seq in recs: query_id = seq[0][1:].split(None,1)[0] if method == "two-step": result[query_id] = ids_from_seq_two_step(seq, n, max_iterations, \ app, core_threshold, extra_threshold, lower_threshold, second_db) elif method == "lower_threshold": result[query_id] = ids_from_seq_lower_threshold(seq, n, \ max_iterations, app, core_threshold, lower_threshold, step) elif method == "iterative": result[query_id] = ids_from_seqs_iterative(seq, app, \ QMEPsiBlast9, scorer, params['-j'], n) else: raise TypeError, "Got unknown method %s" % method params['-j'] = max_iterations return result
python
def psiblast_n_neighbors(seqs, n=100, blast_db=None, core_threshold=1e-50, extra_threshold=1e-10, lower_threshold=1e-6, step=100, method="two-step", blast_mat_root=None, params={}, add_seq_names=False, WorkingDir=None, SuppressStderr=None, SuppressStdout=None, input_handler=None, scorer=3, #shotgun with 3 hits needed to keep second_db=None ): """PsiBlasts sequences, stopping when n neighbors are reached. core_threshold: threshold for the core profile (default: 1e-50) extra_threshold: threshold for pulling in additional seqs (default:1e-10) lower_threshold: threshold for seqs in final round (default:1e-6) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle """ if blast_db: params["-d"] = blast_db ih = input_handler or guess_input_handler(seqs, add_seq_names) recs = seqs_to_stream(seqs, ih) #checkpointing can only handle one seq... #set up the parameters for the core and additional runs max_iterations = params['-j'] params['-j'] = 2 #won't checkpoint with single iteration app = PsiBlast(params=params, blast_mat_root=blast_mat_root, InputHandler='_input_as_lines', WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, ) result = {} for seq in recs: query_id = seq[0][1:].split(None,1)[0] if method == "two-step": result[query_id] = ids_from_seq_two_step(seq, n, max_iterations, \ app, core_threshold, extra_threshold, lower_threshold, second_db) elif method == "lower_threshold": result[query_id] = ids_from_seq_lower_threshold(seq, n, \ max_iterations, app, core_threshold, lower_threshold, step) elif method == "iterative": result[query_id] = ids_from_seqs_iterative(seq, app, \ QMEPsiBlast9, scorer, params['-j'], n) else: raise TypeError, "Got unknown method %s" % method params['-j'] = max_iterations return result
[ "def", "psiblast_n_neighbors", "(", "seqs", ",", "n", "=", "100", ",", "blast_db", "=", "None", ",", "core_threshold", "=", "1e-50", ",", "extra_threshold", "=", "1e-10", ",", "lower_threshold", "=", "1e-6", ",", "step", "=", "100", ",", "method", "=", "...
PsiBlasts sequences, stopping when n neighbors are reached. core_threshold: threshold for the core profile (default: 1e-50) extra_threshold: threshold for pulling in additional seqs (default:1e-10) lower_threshold: threshold for seqs in final round (default:1e-6) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle
[ "PsiBlasts", "sequences", "stopping", "when", "n", "neighbors", "are", "reached", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L730-L796
biocore/burrito-fillings
bfillings/blast.py
ids_from_seq_two_step
def ids_from_seq_two_step(seq, n, max_iterations, app, core_threshold, \ extra_threshold, lower_threshold, second_db=None): """Returns ids that match a seq, using a 2-tiered strategy. Optionally uses a second database for the second search. """ #first time through: reset 'h' and 'e' to core #-h is the e-value threshold for including seqs in the score matrix model app.Parameters['-h'].on(core_threshold) #-e is the e-value threshold for the final blast app.Parameters['-e'].on(core_threshold) checkpoints = [] ids = [] last_num_ids = None for i in range(max_iterations): if checkpoints: app.Parameters['-R'].on(checkpoints[-1]) curr_check = 'checkpoint_%s.chk' % i app.Parameters['-C'].on(curr_check) output = app(seq) #if we didn't write a checkpoint, bail out if not access(curr_check, F_OK): break #if we got here, we wrote a checkpoint file checkpoints.append(curr_check) result = list(output.get('BlastOut', output['StdOut'])) output.cleanUp() if result: ids = LastProteinIds9(result,keep_values=True,filter_identity=False) num_ids = len(ids) if num_ids >= n: break if num_ids == last_num_ids: break last_num_ids = num_ids #if we didn't write any checkpoints, second run won't work, so return ids if not checkpoints: return ids #if we got too many ids and don't have a second database, return the ids we got if (not second_db) and num_ids >= n: return ids #second time through: reset 'h' and 'e' to get extra hits, and switch the #database if appropriate app.Parameters['-h'].on(extra_threshold) app.Parameters['-e'].on(lower_threshold) if second_db: app.Parameters['-d'].on(second_db) for i in range(max_iterations): #will always have last_check if we get here app.Parameters['-R'].on(checkpoints[-1]) curr_check = 'checkpoint_b_%s.chk' % i app.Parameters['-C'].on(curr_check) output = app(seq) #bail out if we couldn't write a checkpoint if not access(curr_check, F_OK): break #if we got here, the checkpoint worked checkpoints.append(curr_check) result = list(output.get('BlastOut', output['StdOut'])) if result: ids = LastProteinIds9(result,keep_values=True,filter_identity=False) num_ids = len(ids) if num_ids >= n: break if num_ids == last_num_ids: break last_num_ids = num_ids #return the ids we got. may not be as many as we wanted. for c in checkpoints: remove(c) return ids
python
def ids_from_seq_two_step(seq, n, max_iterations, app, core_threshold, \ extra_threshold, lower_threshold, second_db=None): """Returns ids that match a seq, using a 2-tiered strategy. Optionally uses a second database for the second search. """ #first time through: reset 'h' and 'e' to core #-h is the e-value threshold for including seqs in the score matrix model app.Parameters['-h'].on(core_threshold) #-e is the e-value threshold for the final blast app.Parameters['-e'].on(core_threshold) checkpoints = [] ids = [] last_num_ids = None for i in range(max_iterations): if checkpoints: app.Parameters['-R'].on(checkpoints[-1]) curr_check = 'checkpoint_%s.chk' % i app.Parameters['-C'].on(curr_check) output = app(seq) #if we didn't write a checkpoint, bail out if not access(curr_check, F_OK): break #if we got here, we wrote a checkpoint file checkpoints.append(curr_check) result = list(output.get('BlastOut', output['StdOut'])) output.cleanUp() if result: ids = LastProteinIds9(result,keep_values=True,filter_identity=False) num_ids = len(ids) if num_ids >= n: break if num_ids == last_num_ids: break last_num_ids = num_ids #if we didn't write any checkpoints, second run won't work, so return ids if not checkpoints: return ids #if we got too many ids and don't have a second database, return the ids we got if (not second_db) and num_ids >= n: return ids #second time through: reset 'h' and 'e' to get extra hits, and switch the #database if appropriate app.Parameters['-h'].on(extra_threshold) app.Parameters['-e'].on(lower_threshold) if second_db: app.Parameters['-d'].on(second_db) for i in range(max_iterations): #will always have last_check if we get here app.Parameters['-R'].on(checkpoints[-1]) curr_check = 'checkpoint_b_%s.chk' % i app.Parameters['-C'].on(curr_check) output = app(seq) #bail out if we couldn't write a checkpoint if not access(curr_check, F_OK): break #if we got here, the checkpoint worked checkpoints.append(curr_check) result = list(output.get('BlastOut', output['StdOut'])) if result: ids = LastProteinIds9(result,keep_values=True,filter_identity=False) num_ids = len(ids) if num_ids >= n: break if num_ids == last_num_ids: break last_num_ids = num_ids #return the ids we got. may not be as many as we wanted. for c in checkpoints: remove(c) return ids
[ "def", "ids_from_seq_two_step", "(", "seq", ",", "n", ",", "max_iterations", ",", "app", ",", "core_threshold", ",", "extra_threshold", ",", "lower_threshold", ",", "second_db", "=", "None", ")", ":", "#first time through: reset 'h' and 'e' to core", "#-h is the e-value...
Returns ids that match a seq, using a 2-tiered strategy. Optionally uses a second database for the second search.
[ "Returns", "ids", "that", "match", "a", "seq", "using", "a", "2", "-", "tiered", "strategy", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L798-L871
biocore/burrito-fillings
bfillings/blast.py
ids_from_seq_lower_threshold
def ids_from_seq_lower_threshold(seq, n, max_iterations, app, core_threshold, \ lower_threshold, step=100): """Returns ids that match a seq, decreasing the sensitivity.""" last_num_ids = None checkpoints = [] cp_name_base = make_unique_str() # cache ides for each iteration # store { iteration_num:(core_threshold, [list of matching ids]) } all_ids = {} try: i=0 while 1: #-h is the e-value threshold for inclusion in the score matrix model app.Parameters['-h'].on(core_threshold) app.Parameters['-e'].on(core_threshold) if core_threshold > lower_threshold: raise ThresholdFound if checkpoints: #-R restarts from a previously stored file app.Parameters['-R'].on(checkpoints[-1]) #store the score model from this iteration curr_check = 'checkpoint_' + cp_name_base + '_' + str(i) + \ '.chk' app.Parameters['-C'].on(curr_check) output = app(seq) result = list(output.get('BlastOut', output['StdOut'])) #sometimes fails on first try -- don't know why, but this seems #to fix problem while not result: output = app(seq) result = list(output.get('BlastOut', output['StdOut'])) ids = LastProteinIds9(result,keep_values=True,filter_identity=False) output.cleanUp() all_ids[i + 1] = (core_threshold, copy(ids)) if not access(curr_check, F_OK): raise ThresholdFound checkpoints.append(curr_check) num_ids = len(ids) if num_ids >= n: raise ThresholdFound last_num_ids = num_ids core_threshold *= step if i >= max_iterations - 1: #because max_iterations is 1-based raise ThresholdFound i += 1 except ThresholdFound: for c in checkpoints: remove(c) #turn app.Parameters['-R'] off so that for the next file it does not #try and read in a checkpoint file that is not there app.Parameters['-R'].off() return ids, i + 1, all_ids
python
def ids_from_seq_lower_threshold(seq, n, max_iterations, app, core_threshold, \ lower_threshold, step=100): """Returns ids that match a seq, decreasing the sensitivity.""" last_num_ids = None checkpoints = [] cp_name_base = make_unique_str() # cache ides for each iteration # store { iteration_num:(core_threshold, [list of matching ids]) } all_ids = {} try: i=0 while 1: #-h is the e-value threshold for inclusion in the score matrix model app.Parameters['-h'].on(core_threshold) app.Parameters['-e'].on(core_threshold) if core_threshold > lower_threshold: raise ThresholdFound if checkpoints: #-R restarts from a previously stored file app.Parameters['-R'].on(checkpoints[-1]) #store the score model from this iteration curr_check = 'checkpoint_' + cp_name_base + '_' + str(i) + \ '.chk' app.Parameters['-C'].on(curr_check) output = app(seq) result = list(output.get('BlastOut', output['StdOut'])) #sometimes fails on first try -- don't know why, but this seems #to fix problem while not result: output = app(seq) result = list(output.get('BlastOut', output['StdOut'])) ids = LastProteinIds9(result,keep_values=True,filter_identity=False) output.cleanUp() all_ids[i + 1] = (core_threshold, copy(ids)) if not access(curr_check, F_OK): raise ThresholdFound checkpoints.append(curr_check) num_ids = len(ids) if num_ids >= n: raise ThresholdFound last_num_ids = num_ids core_threshold *= step if i >= max_iterations - 1: #because max_iterations is 1-based raise ThresholdFound i += 1 except ThresholdFound: for c in checkpoints: remove(c) #turn app.Parameters['-R'] off so that for the next file it does not #try and read in a checkpoint file that is not there app.Parameters['-R'].off() return ids, i + 1, all_ids
[ "def", "ids_from_seq_lower_threshold", "(", "seq", ",", "n", ",", "max_iterations", ",", "app", ",", "core_threshold", ",", "lower_threshold", ",", "step", "=", "100", ")", ":", "last_num_ids", "=", "None", "checkpoints", "=", "[", "]", "cp_name_base", "=", ...
Returns ids that match a seq, decreasing the sensitivity.
[ "Returns", "ids", "that", "match", "a", "seq", "decreasing", "the", "sensitivity", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L875-L928
biocore/burrito-fillings
bfillings/blast.py
make_unique_str
def make_unique_str(num_chars=20): """make a random string of characters for a temp filename""" chars = 'abcdefghigklmnopqrstuvwxyz' all_chars = chars + chars.upper() + '01234567890' picks = list(all_chars) return ''.join([choice(picks) for i in range(num_chars)])
python
def make_unique_str(num_chars=20): """make a random string of characters for a temp filename""" chars = 'abcdefghigklmnopqrstuvwxyz' all_chars = chars + chars.upper() + '01234567890' picks = list(all_chars) return ''.join([choice(picks) for i in range(num_chars)])
[ "def", "make_unique_str", "(", "num_chars", "=", "20", ")", ":", "chars", "=", "'abcdefghigklmnopqrstuvwxyz'", "all_chars", "=", "chars", "+", "chars", ".", "upper", "(", ")", "+", "'01234567890'", "picks", "=", "list", "(", "all_chars", ")", "return", "''",...
make a random string of characters for a temp filename
[ "make", "a", "random", "string", "of", "characters", "for", "a", "temp", "filename" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L930-L935
biocore/burrito-fillings
bfillings/blast.py
keep_everything_scorer
def keep_everything_scorer(checked_ids): """Returns every query and every match in checked_ids, with best score.""" result = checked_ids.keys() for i in checked_ids.values(): result.extend(i.keys()) return dict.fromkeys(result).keys()
python
def keep_everything_scorer(checked_ids): """Returns every query and every match in checked_ids, with best score.""" result = checked_ids.keys() for i in checked_ids.values(): result.extend(i.keys()) return dict.fromkeys(result).keys()
[ "def", "keep_everything_scorer", "(", "checked_ids", ")", ":", "result", "=", "checked_ids", ".", "keys", "(", ")", "for", "i", "in", "checked_ids", ".", "values", "(", ")", ":", "result", ".", "extend", "(", "i", ".", "keys", "(", ")", ")", "return", ...
Returns every query and every match in checked_ids, with best score.
[ "Returns", "every", "query", "and", "every", "match", "in", "checked_ids", "with", "best", "score", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L968-L973
biocore/burrito-fillings
bfillings/blast.py
ids_from_seqs_iterative
def ids_from_seqs_iterative(seqs, app, query_parser, \ scorer=keep_everything_scorer, max_iterations=None, blast_db=None,\ max_seqs=None, ): """Gets the ids from each seq, then does each additional id until all done. If scorer is passed in as an int, uses shotgun scorer with that # hits. """ if isinstance(scorer, int): scorer = make_shotgun_scorer(scorer) seqs_to_check = list(seqs) checked_ids = {} curr_iteration = 0 while seqs_to_check: unchecked_ids = {} #pass seqs to command all_output = app(seqs_to_check) output = all_output.get('BlastOut', all_output['StdOut']) for query_id, match_id, match_score in query_parser(output): if query_id not in checked_ids: checked_ids[query_id] = {} checked_ids[query_id][match_id] = match_score if match_id not in checked_ids: unchecked_ids[match_id] = True all_output.cleanUp() if unchecked_ids: seq_file = fasta_cmd_get_seqs(unchecked_ids.keys(), app.Parameters['-d'].Value)['StdOut'] seqs_to_check = [] for s in FastaCmdFinder(fasta_cmd_get_seqs(\ unchecked_ids.keys(), app.Parameters['-d'].Value)['StdOut']): seqs_to_check.extend(s) else: seqs_to_check = [] #bail out if max iterations or max seqs was defined and we've reached it curr_iteration += 1 if max_iterations and (curr_iteration >= max_iterations): break if max_seqs: curr = scorer(checked_ids) if len(curr) >= max_seqs: return curr return scorer(checked_ids)
python
def ids_from_seqs_iterative(seqs, app, query_parser, \ scorer=keep_everything_scorer, max_iterations=None, blast_db=None,\ max_seqs=None, ): """Gets the ids from each seq, then does each additional id until all done. If scorer is passed in as an int, uses shotgun scorer with that # hits. """ if isinstance(scorer, int): scorer = make_shotgun_scorer(scorer) seqs_to_check = list(seqs) checked_ids = {} curr_iteration = 0 while seqs_to_check: unchecked_ids = {} #pass seqs to command all_output = app(seqs_to_check) output = all_output.get('BlastOut', all_output['StdOut']) for query_id, match_id, match_score in query_parser(output): if query_id not in checked_ids: checked_ids[query_id] = {} checked_ids[query_id][match_id] = match_score if match_id not in checked_ids: unchecked_ids[match_id] = True all_output.cleanUp() if unchecked_ids: seq_file = fasta_cmd_get_seqs(unchecked_ids.keys(), app.Parameters['-d'].Value)['StdOut'] seqs_to_check = [] for s in FastaCmdFinder(fasta_cmd_get_seqs(\ unchecked_ids.keys(), app.Parameters['-d'].Value)['StdOut']): seqs_to_check.extend(s) else: seqs_to_check = [] #bail out if max iterations or max seqs was defined and we've reached it curr_iteration += 1 if max_iterations and (curr_iteration >= max_iterations): break if max_seqs: curr = scorer(checked_ids) if len(curr) >= max_seqs: return curr return scorer(checked_ids)
[ "def", "ids_from_seqs_iterative", "(", "seqs", ",", "app", ",", "query_parser", ",", "scorer", "=", "keep_everything_scorer", ",", "max_iterations", "=", "None", ",", "blast_db", "=", "None", ",", "max_seqs", "=", "None", ",", ")", ":", "if", "isinstance", "...
Gets the ids from each seq, then does each additional id until all done. If scorer is passed in as an int, uses shotgun scorer with that # hits.
[ "Gets", "the", "ids", "from", "each", "seq", "then", "does", "each", "additional", "id", "until", "all", "done", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L975-L1017
biocore/burrito-fillings
bfillings/blast.py
blastp
def blastp(seqs, blast_db="nr", e_value="1e-20", max_hits=200, working_dir=tempfile.gettempdir(), blast_mat_root=None, extra_params={}): """ Returns BlastResult from input seqs, using blastp. Need to add doc string """ # set up params to use with blastp params = { # matrix "-M":"BLOSUM62", # max procs "-a":"1", # expectation "-e":e_value, # max seqs to show "-b":max_hits, # max one line descriptions "-v":max_hits, # program "-p":"blastp" } params.update(extra_params) # blast blast_res = blast_seqs(seqs, Blastall, blast_mat_root=blast_mat_root, blast_db=blast_db, params=params, add_seq_names=False, WorkingDir=working_dir ) # get prot id map if blast_res['StdOut']: lines = [x for x in blast_res['StdOut']] return BlastResult(lines) return None
python
def blastp(seqs, blast_db="nr", e_value="1e-20", max_hits=200, working_dir=tempfile.gettempdir(), blast_mat_root=None, extra_params={}): """ Returns BlastResult from input seqs, using blastp. Need to add doc string """ # set up params to use with blastp params = { # matrix "-M":"BLOSUM62", # max procs "-a":"1", # expectation "-e":e_value, # max seqs to show "-b":max_hits, # max one line descriptions "-v":max_hits, # program "-p":"blastp" } params.update(extra_params) # blast blast_res = blast_seqs(seqs, Blastall, blast_mat_root=blast_mat_root, blast_db=blast_db, params=params, add_seq_names=False, WorkingDir=working_dir ) # get prot id map if blast_res['StdOut']: lines = [x for x in blast_res['StdOut']] return BlastResult(lines) return None
[ "def", "blastp", "(", "seqs", ",", "blast_db", "=", "\"nr\"", ",", "e_value", "=", "\"1e-20\"", ",", "max_hits", "=", "200", ",", "working_dir", "=", "tempfile", ".", "gettempdir", "(", ")", ",", "blast_mat_root", "=", "None", ",", "extra_params", "=", "...
Returns BlastResult from input seqs, using blastp. Need to add doc string
[ "Returns", "BlastResult", "from", "input", "seqs", "using", "blastp", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blast.py#L1020-L1066
lorien/runscript
runscript/lock.py
set_lock
def set_lock(fname): """ Try to lock file and write PID. Return the status of operation. """ global fh fh = open(fname, 'w') if os.name == 'nt': # Code for NT systems got from: http://code.activestate.com/recipes/65203/ import win32con import win32file import pywintypes LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK LOCK_SH = 0 # the default LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # is there any reason not to reuse the following structure? __overlapped = pywintypes.OVERLAPPED() hfile = win32file._get_osfhandle(fh.fileno()) try: win32file.LockFileEx(hfile, LOCK_EX | LOCK_NB, 0, -0x10000, __overlapped) except pywintypes.error as exc_value: # error: (33, 'LockFileEx', 'The process cannot access # the file because another process has locked a portion # of the file.') if exc_value[0] == 33: return False else: from fcntl import flock, LOCK_EX, LOCK_NB try: flock(fh.fileno(), LOCK_EX | LOCK_NB) except Exception as ex: return False fh.write(str(os.getpid())) fh.flush() return True
python
def set_lock(fname): """ Try to lock file and write PID. Return the status of operation. """ global fh fh = open(fname, 'w') if os.name == 'nt': # Code for NT systems got from: http://code.activestate.com/recipes/65203/ import win32con import win32file import pywintypes LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK LOCK_SH = 0 # the default LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # is there any reason not to reuse the following structure? __overlapped = pywintypes.OVERLAPPED() hfile = win32file._get_osfhandle(fh.fileno()) try: win32file.LockFileEx(hfile, LOCK_EX | LOCK_NB, 0, -0x10000, __overlapped) except pywintypes.error as exc_value: # error: (33, 'LockFileEx', 'The process cannot access # the file because another process has locked a portion # of the file.') if exc_value[0] == 33: return False else: from fcntl import flock, LOCK_EX, LOCK_NB try: flock(fh.fileno(), LOCK_EX | LOCK_NB) except Exception as ex: return False fh.write(str(os.getpid())) fh.flush() return True
[ "def", "set_lock", "(", "fname", ")", ":", "global", "fh", "fh", "=", "open", "(", "fname", ",", "'w'", ")", "if", "os", ".", "name", "==", "'nt'", ":", "# Code for NT systems got from: http://code.activestate.com/recipes/65203/", "import", "win32con", "import", ...
Try to lock file and write PID. Return the status of operation.
[ "Try", "to", "lock", "file", "and", "write", "PID", ".", "Return", "the", "status", "of", "operation", "." ]
train
https://github.com/lorien/runscript/blob/52cb9763b53235161bc446ac0206c6a25fb4cfdb/runscript/lock.py#L13-L55
lorien/runscript
runscript/lock.py
assert_lock
def assert_lock(fname): """ If file is locked then terminate program else lock file. """ if not set_lock(fname): logger.error('File {} is already locked. Terminating.'.format(fname)) sys.exit()
python
def assert_lock(fname): """ If file is locked then terminate program else lock file. """ if not set_lock(fname): logger.error('File {} is already locked. Terminating.'.format(fname)) sys.exit()
[ "def", "assert_lock", "(", "fname", ")", ":", "if", "not", "set_lock", "(", "fname", ")", ":", "logger", ".", "error", "(", "'File {} is already locked. Terminating.'", ".", "format", "(", "fname", ")", ")", "sys", ".", "exit", "(", ")" ]
If file is locked then terminate program else lock file.
[ "If", "file", "is", "locked", "then", "terminate", "program", "else", "lock", "file", "." ]
train
https://github.com/lorien/runscript/blob/52cb9763b53235161bc446ac0206c6a25fb4cfdb/runscript/lock.py#L58-L65
biocore/burrito-fillings
bfillings/formatdb.py
build_blast_db_from_fasta_path
def build_blast_db_from_fasta_path(fasta_path, is_protein=False, output_dir=None, HALT_EXEC=False): """Build blast db from fasta_path; return db name and list of files created **If using to create temporary blast databases, you can call cogent.util.misc.remove_files(db_filepaths) to clean up all the files created by formatdb when you're done with the database. fasta_path: path to fasta file of sequences to build database from is_protein: True if working on protein seqs (default: False) output_dir: directory where output should be written (default: directory containing fasta_path) HALT_EXEC: halt just before running the formatdb command and print the command -- useful for debugging """ fasta_dir, fasta_filename = split(fasta_path) if not output_dir: output_dir = fasta_dir or '.' # Will cd to this directory, so just pass the filename # so the app is not confused by relative paths fasta_path = fasta_filename if not output_dir.endswith('/'): db_name = output_dir + '/' + fasta_filename else: db_name = output_dir + fasta_filename # instantiate the object fdb = FormatDb(WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) if is_protein: fdb.Parameters['-p'].on('T') else: fdb.Parameters['-p'].on('F') app_result = fdb(fasta_path) db_filepaths = [] for v in app_result.values(): try: db_filepaths.append(v.name) except AttributeError: # not a file object, so no path to return pass return db_name, db_filepaths
python
def build_blast_db_from_fasta_path(fasta_path, is_protein=False, output_dir=None, HALT_EXEC=False): """Build blast db from fasta_path; return db name and list of files created **If using to create temporary blast databases, you can call cogent.util.misc.remove_files(db_filepaths) to clean up all the files created by formatdb when you're done with the database. fasta_path: path to fasta file of sequences to build database from is_protein: True if working on protein seqs (default: False) output_dir: directory where output should be written (default: directory containing fasta_path) HALT_EXEC: halt just before running the formatdb command and print the command -- useful for debugging """ fasta_dir, fasta_filename = split(fasta_path) if not output_dir: output_dir = fasta_dir or '.' # Will cd to this directory, so just pass the filename # so the app is not confused by relative paths fasta_path = fasta_filename if not output_dir.endswith('/'): db_name = output_dir + '/' + fasta_filename else: db_name = output_dir + fasta_filename # instantiate the object fdb = FormatDb(WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) if is_protein: fdb.Parameters['-p'].on('T') else: fdb.Parameters['-p'].on('F') app_result = fdb(fasta_path) db_filepaths = [] for v in app_result.values(): try: db_filepaths.append(v.name) except AttributeError: # not a file object, so no path to return pass return db_name, db_filepaths
[ "def", "build_blast_db_from_fasta_path", "(", "fasta_path", ",", "is_protein", "=", "False", ",", "output_dir", "=", "None", ",", "HALT_EXEC", "=", "False", ")", ":", "fasta_dir", ",", "fasta_filename", "=", "split", "(", "fasta_path", ")", "if", "not", "outpu...
Build blast db from fasta_path; return db name and list of files created **If using to create temporary blast databases, you can call cogent.util.misc.remove_files(db_filepaths) to clean up all the files created by formatdb when you're done with the database. fasta_path: path to fasta file of sequences to build database from is_protein: True if working on protein seqs (default: False) output_dir: directory where output should be written (default: directory containing fasta_path) HALT_EXEC: halt just before running the formatdb command and print the command -- useful for debugging
[ "Build", "blast", "db", "from", "fasta_path", ";", "return", "db", "name", "and", "list", "of", "files", "created" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/formatdb.py#L88-L129
biocore/burrito-fillings
bfillings/formatdb.py
build_blast_db_from_fasta_file
def build_blast_db_from_fasta_file(fasta_file, is_protein=False, output_dir=None, HALT_EXEC=False): """Build blast db from fasta_path; return db name and list of files created **If using to create temporary blast databases, you can call cogent.util.misc.remove_files(db_filepaths) to clean up all the files created by formatdb when you're done with the database. fasta_path: path to fasta file of sequences to build database from is_protein: True if working on protein seqs (default: False) output_dir: directory where output should be written (default: directory containing fasta_path) HALT_EXEC: halt just before running the formatdb command and print the command -- useful for debugging """ output_dir = output_dir or '.' _, fasta_path = mkstemp(dir=output_dir, prefix="BLAST_temp_db_", suffix=".fasta") fasta_f = open(fasta_path, 'w') for line in fasta_file: fasta_f.write('%s\n' % line.strip()) fasta_f.close() blast_db, db_filepaths = build_blast_db_from_fasta_path(fasta_path, is_protein=is_protein, output_dir=None, HALT_EXEC=HALT_EXEC ) db_filepaths.append(fasta_path) return blast_db, db_filepaths
python
def build_blast_db_from_fasta_file(fasta_file, is_protein=False, output_dir=None, HALT_EXEC=False): """Build blast db from fasta_path; return db name and list of files created **If using to create temporary blast databases, you can call cogent.util.misc.remove_files(db_filepaths) to clean up all the files created by formatdb when you're done with the database. fasta_path: path to fasta file of sequences to build database from is_protein: True if working on protein seqs (default: False) output_dir: directory where output should be written (default: directory containing fasta_path) HALT_EXEC: halt just before running the formatdb command and print the command -- useful for debugging """ output_dir = output_dir or '.' _, fasta_path = mkstemp(dir=output_dir, prefix="BLAST_temp_db_", suffix=".fasta") fasta_f = open(fasta_path, 'w') for line in fasta_file: fasta_f.write('%s\n' % line.strip()) fasta_f.close() blast_db, db_filepaths = build_blast_db_from_fasta_path(fasta_path, is_protein=is_protein, output_dir=None, HALT_EXEC=HALT_EXEC ) db_filepaths.append(fasta_path) return blast_db, db_filepaths
[ "def", "build_blast_db_from_fasta_file", "(", "fasta_file", ",", "is_protein", "=", "False", ",", "output_dir", "=", "None", ",", "HALT_EXEC", "=", "False", ")", ":", "output_dir", "=", "output_dir", "or", "'.'", "_", ",", "fasta_path", "=", "mkstemp", "(", ...
Build blast db from fasta_path; return db name and list of files created **If using to create temporary blast databases, you can call cogent.util.misc.remove_files(db_filepaths) to clean up all the files created by formatdb when you're done with the database. fasta_path: path to fasta file of sequences to build database from is_protein: True if working on protein seqs (default: False) output_dir: directory where output should be written (default: directory containing fasta_path) HALT_EXEC: halt just before running the formatdb command and print the command -- useful for debugging
[ "Build", "blast", "db", "from", "fasta_path", ";", "return", "db", "name", "and", "list", "of", "files", "created" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/formatdb.py#L132-L164
biocore/burrito-fillings
bfillings/formatdb.py
build_blast_db_from_seqs
def build_blast_db_from_seqs(seqs, is_protein=False, output_dir='./', HALT_EXEC=False): """Build blast db from seqs; return db name and list of files created **If using to create temporary blast databases, you can call cogent.util.misc.remove_files(db_filepaths) to clean up all the files created by formatdb when you're done with the database. seqs: sequence collection or alignment object is_protein: True if working on protein seqs (default: False) output_dir: directory where output should be written (default: current directory) HALT_EXEC: halt just before running the formatdb command and print the command -- useful for debugging """ # Build a temp filepath _, tmp_fasta_filepath = mkstemp(prefix='Blast_tmp_db', suffix='.fasta') # open the temp file tmp_fasta_file = open(tmp_fasta_filepath, 'w') # write the sequence collection to file tmp_fasta_file.write(seqs.toFasta()) tmp_fasta_file.close() # build the bast database db_name, db_filepaths = build_blast_db_from_fasta_path(tmp_fasta_filepath, is_protein=is_protein, output_dir=output_dir, HALT_EXEC=HALT_EXEC) # clean-up the temporary file remove(tmp_fasta_filepath) # return the results return db_name, db_filepaths
python
def build_blast_db_from_seqs(seqs, is_protein=False, output_dir='./', HALT_EXEC=False): """Build blast db from seqs; return db name and list of files created **If using to create temporary blast databases, you can call cogent.util.misc.remove_files(db_filepaths) to clean up all the files created by formatdb when you're done with the database. seqs: sequence collection or alignment object is_protein: True if working on protein seqs (default: False) output_dir: directory where output should be written (default: current directory) HALT_EXEC: halt just before running the formatdb command and print the command -- useful for debugging """ # Build a temp filepath _, tmp_fasta_filepath = mkstemp(prefix='Blast_tmp_db', suffix='.fasta') # open the temp file tmp_fasta_file = open(tmp_fasta_filepath, 'w') # write the sequence collection to file tmp_fasta_file.write(seqs.toFasta()) tmp_fasta_file.close() # build the bast database db_name, db_filepaths = build_blast_db_from_fasta_path(tmp_fasta_filepath, is_protein=is_protein, output_dir=output_dir, HALT_EXEC=HALT_EXEC) # clean-up the temporary file remove(tmp_fasta_filepath) # return the results return db_name, db_filepaths
[ "def", "build_blast_db_from_seqs", "(", "seqs", ",", "is_protein", "=", "False", ",", "output_dir", "=", "'./'", ",", "HALT_EXEC", "=", "False", ")", ":", "# Build a temp filepath", "_", ",", "tmp_fasta_filepath", "=", "mkstemp", "(", "prefix", "=", "'Blast_tmp_...
Build blast db from seqs; return db name and list of files created **If using to create temporary blast databases, you can call cogent.util.misc.remove_files(db_filepaths) to clean up all the files created by formatdb when you're done with the database. seqs: sequence collection or alignment object is_protein: True if working on protein seqs (default: False) output_dir: directory where output should be written (default: current directory) HALT_EXEC: halt just before running the formatdb command and print the command -- useful for debugging
[ "Build", "blast", "db", "from", "seqs", ";", "return", "db", "name", "and", "list", "of", "files", "created" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/formatdb.py#L167-L201
biocore/burrito-fillings
bfillings/formatdb.py
parse_command_line_parameters
def parse_command_line_parameters(): """ Parses command line arguments """ usage = 'usage: %prog [options] fasta_filepath' version = 'Version: %prog 0.1' parser = OptionParser(usage=usage, version=version) # A binary 'verbose' flag parser.add_option('-p', '--is_protein', action='store_true', dest='is_protein', default=False, help='Pass if building db of protein sequences [default:' ' False, nucleotide db]') parser.add_option('-o', '--output_dir', action='store', type='string', dest='output_dir', default=None, help='the output directory [default: directory ' 'containing input fasta_filepath]') opts, args = parser.parse_args() num_args = 1 if len(args) != num_args: parser.error('Must provide single filepath to build database from.') return opts, args
python
def parse_command_line_parameters(): """ Parses command line arguments """ usage = 'usage: %prog [options] fasta_filepath' version = 'Version: %prog 0.1' parser = OptionParser(usage=usage, version=version) # A binary 'verbose' flag parser.add_option('-p', '--is_protein', action='store_true', dest='is_protein', default=False, help='Pass if building db of protein sequences [default:' ' False, nucleotide db]') parser.add_option('-o', '--output_dir', action='store', type='string', dest='output_dir', default=None, help='the output directory [default: directory ' 'containing input fasta_filepath]') opts, args = parser.parse_args() num_args = 1 if len(args) != num_args: parser.error('Must provide single filepath to build database from.') return opts, args
[ "def", "parse_command_line_parameters", "(", ")", ":", "usage", "=", "'usage: %prog [options] fasta_filepath'", "version", "=", "'Version: %prog 0.1'", "parser", "=", "OptionParser", "(", "usage", "=", "usage", ",", "version", "=", "version", ")", "# A binary 'verbose' ...
Parses command line arguments
[ "Parses", "command", "line", "arguments" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/formatdb.py#L204-L226
biocore/burrito-fillings
bfillings/formatdb.py
FormatDb._input_as_parameter
def _input_as_parameter(self, data): """ Set the input path and log path based on data (a fasta filepath) """ self.Parameters['-i'].on(data) # access data through self.Parameters so we know it's been cast # to a FilePath input_filepath = self.Parameters['-i'].Value input_file_dir, input_filename = split(input_filepath) input_file_base, input_file_ext = splitext(input_filename) # FIXME: the following all other options # formatdb ignores the working directory if not name is passed. self.Parameters['-l'].on(FilePath('%s.log') % input_filename) self.Parameters['-n'].on(FilePath(input_filename)) return ''
python
def _input_as_parameter(self, data): """ Set the input path and log path based on data (a fasta filepath) """ self.Parameters['-i'].on(data) # access data through self.Parameters so we know it's been cast # to a FilePath input_filepath = self.Parameters['-i'].Value input_file_dir, input_filename = split(input_filepath) input_file_base, input_file_ext = splitext(input_filename) # FIXME: the following all other options # formatdb ignores the working directory if not name is passed. self.Parameters['-l'].on(FilePath('%s.log') % input_filename) self.Parameters['-n'].on(FilePath(input_filename)) return ''
[ "def", "_input_as_parameter", "(", "self", ",", "data", ")", ":", "self", ".", "Parameters", "[", "'-i'", "]", ".", "on", "(", "data", ")", "# access data through self.Parameters so we know it's been cast", "# to a FilePath", "input_filepath", "=", "self", ".", "Par...
Set the input path and log path based on data (a fasta filepath)
[ "Set", "the", "input", "path", "and", "log", "path", "based", "on", "data", "(", "a", "fasta", "filepath", ")" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/formatdb.py#L45-L58
biocore/burrito-fillings
bfillings/formatdb.py
FormatDb._get_result_paths
def _get_result_paths(self, data): """ Build the dict of result filepaths """ # access data through self.Parameters so we know it's been cast # to a FilePath wd = self.WorkingDir db_name = self.Parameters['-n'].Value log_name = self.Parameters['-l'].Value result = {} result['log'] = ResultPath(Path=wd + log_name, IsWritten=True) if self.Parameters['-p'].Value == 'F': extensions = ['nhr', 'nin', 'nsq', 'nsd', 'nsi'] else: extensions = ['phr', 'pin', 'psq', 'psd', 'psi'] for extension in extensions: for file_path in glob(wd + (db_name + '*' + extension)): # this will match e.g. nr.01.psd and nr.psd key = file_path.split(db_name + '.')[1] result_path = ResultPath(Path=file_path, IsWritten=True) result[key] = result_path return result
python
def _get_result_paths(self, data): """ Build the dict of result filepaths """ # access data through self.Parameters so we know it's been cast # to a FilePath wd = self.WorkingDir db_name = self.Parameters['-n'].Value log_name = self.Parameters['-l'].Value result = {} result['log'] = ResultPath(Path=wd + log_name, IsWritten=True) if self.Parameters['-p'].Value == 'F': extensions = ['nhr', 'nin', 'nsq', 'nsd', 'nsi'] else: extensions = ['phr', 'pin', 'psq', 'psd', 'psi'] for extension in extensions: for file_path in glob(wd + (db_name + '*' + extension)): # this will match e.g. nr.01.psd and nr.psd key = file_path.split(db_name + '.')[1] result_path = ResultPath(Path=file_path, IsWritten=True) result[key] = result_path return result
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "# access data through self.Parameters so we know it's been cast", "# to a FilePath", "wd", "=", "self", ".", "WorkingDir", "db_name", "=", "self", ".", "Parameters", "[", "'-n'", "]", ".", "Value", "lo...
Build the dict of result filepaths
[ "Build", "the", "dict", "of", "result", "filepaths" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/formatdb.py#L60-L80
dailymuse/oz
oz/redis_sessions/middleware.py
RedisSessionMiddleware._session_key
def _session_key(self): """Gets the redis key for a session""" if not hasattr(self, "_cached_session_key"): session_id_bytes = self.get_secure_cookie("session_id") session_id = None if session_id_bytes: try: session_id = session_id_bytes.decode('utf-8') except: pass if not session_id: session_id = oz.redis_sessions.random_hex(20) session_time = oz.settings["session_time"] kwargs = dict( name="session_id", value=session_id.encode('utf-8'), domain=oz.settings.get("cookie_domain"), httponly=True, ) if session_time: kwargs["expires_days"] = round(session_time/60/60/24) self.set_secure_cookie(**kwargs) password_salt = oz.settings["session_salt"] self._cached_session_key = "session:%s:v4" % oz.redis_sessions.password_hash(session_id, password_salt=password_salt) return self._cached_session_key
python
def _session_key(self): """Gets the redis key for a session""" if not hasattr(self, "_cached_session_key"): session_id_bytes = self.get_secure_cookie("session_id") session_id = None if session_id_bytes: try: session_id = session_id_bytes.decode('utf-8') except: pass if not session_id: session_id = oz.redis_sessions.random_hex(20) session_time = oz.settings["session_time"] kwargs = dict( name="session_id", value=session_id.encode('utf-8'), domain=oz.settings.get("cookie_domain"), httponly=True, ) if session_time: kwargs["expires_days"] = round(session_time/60/60/24) self.set_secure_cookie(**kwargs) password_salt = oz.settings["session_salt"] self._cached_session_key = "session:%s:v4" % oz.redis_sessions.password_hash(session_id, password_salt=password_salt) return self._cached_session_key
[ "def", "_session_key", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_cached_session_key\"", ")", ":", "session_id_bytes", "=", "self", ".", "get_secure_cookie", "(", "\"session_id\"", ")", "session_id", "=", "None", "if", "session_id_bytes...
Gets the redis key for a session
[ "Gets", "the", "redis", "key", "for", "a", "session" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/redis_sessions/middleware.py#L12-L43
dailymuse/oz
oz/redis_sessions/middleware.py
RedisSessionMiddleware._update_session_expiration
def _update_session_expiration(self): """ Updates a redis item to expire later since it has been interacted with recently """ session_time = oz.settings["session_time"] if session_time: self.redis().expire(self._session_key, session_time)
python
def _update_session_expiration(self): """ Updates a redis item to expire later since it has been interacted with recently """ session_time = oz.settings["session_time"] if session_time: self.redis().expire(self._session_key, session_time)
[ "def", "_update_session_expiration", "(", "self", ")", ":", "session_time", "=", "oz", ".", "settings", "[", "\"session_time\"", "]", "if", "session_time", ":", "self", ".", "redis", "(", ")", ".", "expire", "(", "self", ".", "_session_key", ",", "session_ti...
Updates a redis item to expire later since it has been interacted with recently
[ "Updates", "a", "redis", "item", "to", "expire", "later", "since", "it", "has", "been", "interacted", "with", "recently" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/redis_sessions/middleware.py#L45-L54
dailymuse/oz
oz/redis_sessions/middleware.py
RedisSessionMiddleware.get_session_value
def get_session_value(self, name, default=None): """Gets a session value""" value = self.redis().hget(self._session_key, name) or default self._update_session_expiration() return value
python
def get_session_value(self, name, default=None): """Gets a session value""" value = self.redis().hget(self._session_key, name) or default self._update_session_expiration() return value
[ "def", "get_session_value", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "value", "=", "self", ".", "redis", "(", ")", ".", "hget", "(", "self", ".", "_session_key", ",", "name", ")", "or", "default", "self", ".", "_update_session_e...
Gets a session value
[ "Gets", "a", "session", "value" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/redis_sessions/middleware.py#L56-L61
dailymuse/oz
oz/redis_sessions/middleware.py
RedisSessionMiddleware.set_session_value
def set_session_value(self, name, value): """Sets a session value""" self.redis().hset(self._session_key, name, value) self._update_session_expiration()
python
def set_session_value(self, name, value): """Sets a session value""" self.redis().hset(self._session_key, name, value) self._update_session_expiration()
[ "def", "set_session_value", "(", "self", ",", "name", ",", "value", ")", ":", "self", ".", "redis", "(", ")", ".", "hset", "(", "self", ".", "_session_key", ",", "name", ",", "value", ")", "self", ".", "_update_session_expiration", "(", ")" ]
Sets a session value
[ "Sets", "a", "session", "value" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/redis_sessions/middleware.py#L63-L67
dailymuse/oz
oz/redis_sessions/middleware.py
RedisSessionMiddleware.clear_session_value
def clear_session_value(self, name): """Removes a session value""" self.redis().hdel(self._session_key, name) self._update_session_expiration()
python
def clear_session_value(self, name): """Removes a session value""" self.redis().hdel(self._session_key, name) self._update_session_expiration()
[ "def", "clear_session_value", "(", "self", ",", "name", ")", ":", "self", ".", "redis", "(", ")", ".", "hdel", "(", "self", ".", "_session_key", ",", "name", ")", "self", ".", "_update_session_expiration", "(", ")" ]
Removes a session value
[ "Removes", "a", "session", "value" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/redis_sessions/middleware.py#L69-L72
michaelpb/omnic
omnic/web/security.py
rewrite_middleware
async def rewrite_middleware(server, request): ''' Sanic middleware that utilizes a security class's "rewrite" method to check ''' if singletons.settings.SECURITY is not None: security_class = singletons.settings.load('SECURITY') else: security_class = DummySecurity security = security_class() try: new_path = await security.rewrite(request) except SecurityException as e: msg = '' if DEBUG: msg = str(e) return server.response.text(msg, status=400) request.path = new_path
python
async def rewrite_middleware(server, request): ''' Sanic middleware that utilizes a security class's "rewrite" method to check ''' if singletons.settings.SECURITY is not None: security_class = singletons.settings.load('SECURITY') else: security_class = DummySecurity security = security_class() try: new_path = await security.rewrite(request) except SecurityException as e: msg = '' if DEBUG: msg = str(e) return server.response.text(msg, status=400) request.path = new_path
[ "async", "def", "rewrite_middleware", "(", "server", ",", "request", ")", ":", "if", "singletons", ".", "settings", ".", "SECURITY", "is", "not", "None", ":", "security_class", "=", "singletons", ".", "settings", ".", "load", "(", "'SECURITY'", ")", "else", ...
Sanic middleware that utilizes a security class's "rewrite" method to check
[ "Sanic", "middleware", "that", "utilizes", "a", "security", "class", "s", "rewrite", "method", "to", "check" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/web/security.py#L66-L83
mickybart/python-atlasapi
atlasapi/network.py
Network.answer
def answer(self, c, details): """Answer will provide all necessary feedback for the caller Args: c (int): HTTP Code details (dict): Response payload Returns: dict: Response payload Raises: ErrAtlasBadRequest ErrAtlasUnauthorized ErrAtlasForbidden ErrAtlasNotFound ErrAtlasMethodNotAllowed ErrAtlasConflict ErrAtlasServerErrors """ if c in [Settings.SUCCESS, Settings.CREATED, Settings.ACCEPTED]: return details elif c == Settings.BAD_REQUEST: raise ErrAtlasBadRequest(c, details) elif c == Settings.UNAUTHORIZED: raise ErrAtlasUnauthorized(c, details) elif c == Settings.FORBIDDEN: raise ErrAtlasForbidden(c, details) elif c == Settings.NOTFOUND: raise ErrAtlasNotFound(c, details) elif c == Settings.METHOD_NOT_ALLOWED: raise ErrAtlasMethodNotAllowed(c, details) elif c == Settings.CONFLICT: raise ErrAtlasConflict(c, details) else: # Settings.SERVER_ERRORS raise ErrAtlasServerErrors(c, details)
python
def answer(self, c, details): """Answer will provide all necessary feedback for the caller Args: c (int): HTTP Code details (dict): Response payload Returns: dict: Response payload Raises: ErrAtlasBadRequest ErrAtlasUnauthorized ErrAtlasForbidden ErrAtlasNotFound ErrAtlasMethodNotAllowed ErrAtlasConflict ErrAtlasServerErrors """ if c in [Settings.SUCCESS, Settings.CREATED, Settings.ACCEPTED]: return details elif c == Settings.BAD_REQUEST: raise ErrAtlasBadRequest(c, details) elif c == Settings.UNAUTHORIZED: raise ErrAtlasUnauthorized(c, details) elif c == Settings.FORBIDDEN: raise ErrAtlasForbidden(c, details) elif c == Settings.NOTFOUND: raise ErrAtlasNotFound(c, details) elif c == Settings.METHOD_NOT_ALLOWED: raise ErrAtlasMethodNotAllowed(c, details) elif c == Settings.CONFLICT: raise ErrAtlasConflict(c, details) else: # Settings.SERVER_ERRORS raise ErrAtlasServerErrors(c, details)
[ "def", "answer", "(", "self", ",", "c", ",", "details", ")", ":", "if", "c", "in", "[", "Settings", ".", "SUCCESS", ",", "Settings", ".", "CREATED", ",", "Settings", ".", "ACCEPTED", "]", ":", "return", "details", "elif", "c", "==", "Settings", ".", ...
Answer will provide all necessary feedback for the caller Args: c (int): HTTP Code details (dict): Response payload Returns: dict: Response payload Raises: ErrAtlasBadRequest ErrAtlasUnauthorized ErrAtlasForbidden ErrAtlasNotFound ErrAtlasMethodNotAllowed ErrAtlasConflict ErrAtlasServerErrors
[ "Answer", "will", "provide", "all", "necessary", "feedback", "for", "the", "caller", "Args", ":", "c", "(", "int", ")", ":", "HTTP", "Code", "details", "(", "dict", ")", ":", "Response", "payload", "Returns", ":", "dict", ":", "Response", "payload", "Rai...
train
https://github.com/mickybart/python-atlasapi/blob/2962c37740998694cb55f82b375b81cc604b953e/atlasapi/network.py#L37-L73
mickybart/python-atlasapi
atlasapi/network.py
Network.get
def get(self, uri): """Get request Args: uri (str): URI Returns: Json: API response Raises: Exception: Network issue """ r = None try: r = requests.get(uri, allow_redirects=True, timeout=Settings.requests_timeout, headers={}, auth=HTTPDigestAuth(self.user, self.password)) return self.answer(r.status_code, r.json()) except: raise finally: if r: r.connection.close()
python
def get(self, uri): """Get request Args: uri (str): URI Returns: Json: API response Raises: Exception: Network issue """ r = None try: r = requests.get(uri, allow_redirects=True, timeout=Settings.requests_timeout, headers={}, auth=HTTPDigestAuth(self.user, self.password)) return self.answer(r.status_code, r.json()) except: raise finally: if r: r.connection.close()
[ "def", "get", "(", "self", ",", "uri", ")", ":", "r", "=", "None", "try", ":", "r", "=", "requests", ".", "get", "(", "uri", ",", "allow_redirects", "=", "True", ",", "timeout", "=", "Settings", ".", "requests_timeout", ",", "headers", "=", "{", "}...
Get request Args: uri (str): URI Returns: Json: API response Raises: Exception: Network issue
[ "Get", "request", "Args", ":", "uri", "(", "str", ")", ":", "URI", "Returns", ":", "Json", ":", "API", "response", "Raises", ":", "Exception", ":", "Network", "issue" ]
train
https://github.com/mickybart/python-atlasapi/blob/2962c37740998694cb55f82b375b81cc604b953e/atlasapi/network.py#L75-L100
kejbaly2/metrique
metrique/cubes/gitdata/commit.py
Commit.get_objects
def get_objects(self, uri, pull=True, **kwargs): ''' Walk through repo commits to generate a list of repo commit objects. Each object has the following properties: * repo uri * general commit info * files added, removed fnames * lines added, removed * acked_by * signed_off_by * resolves * related ''' self.repo = repo = git_clone(uri, pull=pull, reflect=True) # get a full list of all commit SHAs in the repo (all branches) cmd = 'git rev-list --all' output = sys_call(cmd, cwd=repo.path) repo_shas = set(x.strip() for x in output.split('\n') if x) logger.debug("Total Commits: %s" % len(repo_shas)) cmd = 'git --no-pager log --all --format=sha:%H --numstat' output = sys_call(cmd) all_logs = re.sub('\n+', '\n', output) c_logs = [x for x in [s.strip() for s in all_logs.split('sha:')] if x] _end = None # once was true, always is true... objs = [] for c_log in c_logs: sha, s, all_changes = c_log.partition('\n') #try: c = repo.get_object(sha) # FIXME: not normalizing to UTC _start = ts2dt(c.commit_time) #except Exception as e: # _start = now # obj = dict(_oid=sha, _start=_start, _end=_end, # repo_uri=uri, _e={sha: to_encoding(e)}) # self.objects.add(obj) # continue # and some basic stuff... obj = dict(_oid=sha, _start=_start, _end=_end, repo_uri=uri, tree=c.tree, parents=c.parents, author=c.author, committer=c.committer, author_time=c.author_time, message=c.message, mergetag=c.mergetag, extra=c.extra) for _file in all_changes.split('\n'): _file = _file.strip() obj.setdefault('files', {}) if not _file: added, removed, fname = 0, 0, None else: added, removed, fname = _file.split('\t') added = 0 if added == '-' else int(added) removed = 0 if removed == '-' else int(removed) # FIXME: sql doesn't nest well.. changes = {'added': added, 'removed': removed} obj['files'][fname] = changes # file +/- totals obj['added'] = sum( [v.get('added', 0) for v in obj['files'].itervalues()]) obj['removed'] = sum( [v.get('removed', 0) for v in obj['files'].itervalues()]) # extract interesting bits from the message obj['acked_by'] = acked_by_re.findall(c.message) obj['signed_off_by'] = signed_off_by_re.findall(c.message) obj['resolves'] = resolves_re.findall(c.message) obj['related'] = related_re.findall(c.message) objs.append(obj) self.objects.extend(objs) return super(Commit, self).get_objects(**kwargs)
python
def get_objects(self, uri, pull=True, **kwargs): ''' Walk through repo commits to generate a list of repo commit objects. Each object has the following properties: * repo uri * general commit info * files added, removed fnames * lines added, removed * acked_by * signed_off_by * resolves * related ''' self.repo = repo = git_clone(uri, pull=pull, reflect=True) # get a full list of all commit SHAs in the repo (all branches) cmd = 'git rev-list --all' output = sys_call(cmd, cwd=repo.path) repo_shas = set(x.strip() for x in output.split('\n') if x) logger.debug("Total Commits: %s" % len(repo_shas)) cmd = 'git --no-pager log --all --format=sha:%H --numstat' output = sys_call(cmd) all_logs = re.sub('\n+', '\n', output) c_logs = [x for x in [s.strip() for s in all_logs.split('sha:')] if x] _end = None # once was true, always is true... objs = [] for c_log in c_logs: sha, s, all_changes = c_log.partition('\n') #try: c = repo.get_object(sha) # FIXME: not normalizing to UTC _start = ts2dt(c.commit_time) #except Exception as e: # _start = now # obj = dict(_oid=sha, _start=_start, _end=_end, # repo_uri=uri, _e={sha: to_encoding(e)}) # self.objects.add(obj) # continue # and some basic stuff... obj = dict(_oid=sha, _start=_start, _end=_end, repo_uri=uri, tree=c.tree, parents=c.parents, author=c.author, committer=c.committer, author_time=c.author_time, message=c.message, mergetag=c.mergetag, extra=c.extra) for _file in all_changes.split('\n'): _file = _file.strip() obj.setdefault('files', {}) if not _file: added, removed, fname = 0, 0, None else: added, removed, fname = _file.split('\t') added = 0 if added == '-' else int(added) removed = 0 if removed == '-' else int(removed) # FIXME: sql doesn't nest well.. changes = {'added': added, 'removed': removed} obj['files'][fname] = changes # file +/- totals obj['added'] = sum( [v.get('added', 0) for v in obj['files'].itervalues()]) obj['removed'] = sum( [v.get('removed', 0) for v in obj['files'].itervalues()]) # extract interesting bits from the message obj['acked_by'] = acked_by_re.findall(c.message) obj['signed_off_by'] = signed_off_by_re.findall(c.message) obj['resolves'] = resolves_re.findall(c.message) obj['related'] = related_re.findall(c.message) objs.append(obj) self.objects.extend(objs) return super(Commit, self).get_objects(**kwargs)
[ "def", "get_objects", "(", "self", ",", "uri", ",", "pull", "=", "True", ",", "*", "*", "kwargs", ")", ":", "self", ".", "repo", "=", "repo", "=", "git_clone", "(", "uri", ",", "pull", "=", "pull", ",", "reflect", "=", "True", ")", "# get a full li...
Walk through repo commits to generate a list of repo commit objects. Each object has the following properties: * repo uri * general commit info * files added, removed fnames * lines added, removed * acked_by * signed_off_by * resolves * related
[ "Walk", "through", "repo", "commits", "to", "generate", "a", "list", "of", "repo", "commit", "objects", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/cubes/gitdata/commit.py#L54-L131
zardus/idalink
idalink/memory.py
_dict_values_sorted_by_key
def _dict_values_sorted_by_key(dictionary): # This should be a yield from instead. """Internal helper to return the values of a dictionary, sorted by key. """ for _, value in sorted(dictionary.iteritems(), key=operator.itemgetter(0)): yield value
python
def _dict_values_sorted_by_key(dictionary): # This should be a yield from instead. """Internal helper to return the values of a dictionary, sorted by key. """ for _, value in sorted(dictionary.iteritems(), key=operator.itemgetter(0)): yield value
[ "def", "_dict_values_sorted_by_key", "(", "dictionary", ")", ":", "# This should be a yield from instead.", "for", "_", ",", "value", "in", "sorted", "(", "dictionary", ".", "iteritems", "(", ")", ",", "key", "=", "operator", ".", "itemgetter", "(", "0", ")", ...
Internal helper to return the values of a dictionary, sorted by key.
[ "Internal", "helper", "to", "return", "the", "values", "of", "a", "dictionary", "sorted", "by", "key", "." ]
train
https://github.com/zardus/idalink/blob/cf68144e7c72679a5429d8b8d9e9aa316d9b79ac/idalink/memory.py#L19-L24
zardus/idalink
idalink/memory.py
_ondemand
def _ondemand(f): """Decorator to only request information if not in cache already. """ name = f.__name__ def func(self, *args, **kwargs): if not args and not kwargs: if hasattr(self, '_%s' % name): return getattr(self, '_%s' % name) a = f(self, *args, **kwargs) setattr(self, '_%s' % name, a) return a else: return f(self, *args, **kwargs) func.__name__ = name return func
python
def _ondemand(f): """Decorator to only request information if not in cache already. """ name = f.__name__ def func(self, *args, **kwargs): if not args and not kwargs: if hasattr(self, '_%s' % name): return getattr(self, '_%s' % name) a = f(self, *args, **kwargs) setattr(self, '_%s' % name, a) return a else: return f(self, *args, **kwargs) func.__name__ = name return func
[ "def", "_ondemand", "(", "f", ")", ":", "name", "=", "f", ".", "__name__", "def", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "args", "and", "not", "kwargs", ":", "if", "hasattr", "(", "self", ",", "'_...
Decorator to only request information if not in cache already.
[ "Decorator", "to", "only", "request", "information", "if", "not", "in", "cache", "already", "." ]
train
https://github.com/zardus/idalink/blob/cf68144e7c72679a5429d8b8d9e9aa316d9b79ac/idalink/memory.py#L27-L43
zardus/idalink
idalink/memory.py
CachedIDAMemory.get_memory
def get_memory(self, start, size): """Retrieve an area of memory from IDA. Returns a sparse dictionary of address -> value. """ LOG.debug('get_memory: %d bytes from %x', size, start) return get_memory(self.ida.idaapi, start, size, default_byte=self.default_byte)
python
def get_memory(self, start, size): """Retrieve an area of memory from IDA. Returns a sparse dictionary of address -> value. """ LOG.debug('get_memory: %d bytes from %x', size, start) return get_memory(self.ida.idaapi, start, size, default_byte=self.default_byte)
[ "def", "get_memory", "(", "self", ",", "start", ",", "size", ")", ":", "LOG", ".", "debug", "(", "'get_memory: %d bytes from %x'", ",", "size", ",", "start", ")", "return", "get_memory", "(", "self", ".", "ida", ".", "idaapi", ",", "start", ",", "size", ...
Retrieve an area of memory from IDA. Returns a sparse dictionary of address -> value.
[ "Retrieve", "an", "area", "of", "memory", "from", "IDA", ".", "Returns", "a", "sparse", "dictionary", "of", "address", "-", ">", "value", "." ]
train
https://github.com/zardus/idalink/blob/cf68144e7c72679a5429d8b8d9e9aa316d9b79ac/idalink/memory.py#L283-L289
kejbaly2/metrique
metrique/cubes/osinfo/rpm.py
Rpm.get_objects
def get_objects(self, **kwargs): ''' Run `rpm -q` command on a {local, remote} system to get back details of installed RPMs. Default rpm details extracted are as follows: * name * version * release * arch * nvra * license * os * packager * platform * sourcepackage * sourcerpm * summary ''' fmt = ':::'.join('%%{%s}' % f for f in self._fields) if self.ssh_host: output = self._ssh_cmd(fmt) else: output = self._local_cmd(fmt) if isinstance(output, basestring): output = unicode(output, 'utf-8') output = output.strip().split('\n') lines = [l.strip().split(':::') for l in output] now = utcnow() host = self.ssh_host or socket.gethostname() for line in lines: obj = {'host': host, '_start': now} for i, item in enumerate(line): if item == '(none)': item = None obj[self._fields[i]] = item obj['_oid'] = '%s__%s' % (host, obj['nvra']) self.objects.add(obj) return super(Rpm, self).get_objects(**kwargs)
python
def get_objects(self, **kwargs): ''' Run `rpm -q` command on a {local, remote} system to get back details of installed RPMs. Default rpm details extracted are as follows: * name * version * release * arch * nvra * license * os * packager * platform * sourcepackage * sourcerpm * summary ''' fmt = ':::'.join('%%{%s}' % f for f in self._fields) if self.ssh_host: output = self._ssh_cmd(fmt) else: output = self._local_cmd(fmt) if isinstance(output, basestring): output = unicode(output, 'utf-8') output = output.strip().split('\n') lines = [l.strip().split(':::') for l in output] now = utcnow() host = self.ssh_host or socket.gethostname() for line in lines: obj = {'host': host, '_start': now} for i, item in enumerate(line): if item == '(none)': item = None obj[self._fields[i]] = item obj['_oid'] = '%s__%s' % (host, obj['nvra']) self.objects.add(obj) return super(Rpm, self).get_objects(**kwargs)
[ "def", "get_objects", "(", "self", ",", "*", "*", "kwargs", ")", ":", "fmt", "=", "':::'", ".", "join", "(", "'%%{%s}'", "%", "f", "for", "f", "in", "self", ".", "_fields", ")", "if", "self", ".", "ssh_host", ":", "output", "=", "self", ".", "_ss...
Run `rpm -q` command on a {local, remote} system to get back details of installed RPMs. Default rpm details extracted are as follows: * name * version * release * arch * nvra * license * os * packager * platform * sourcepackage * sourcerpm * summary
[ "Run", "rpm", "-", "q", "command", "on", "a", "{", "local", "remote", "}", "system", "to", "get", "back", "details", "of", "installed", "RPMs", "." ]
train
https://github.com/kejbaly2/metrique/blob/a10b076097441b7dde687949139f702f5c1e1b35/metrique/cubes/osinfo/rpm.py#L79-L117
EndurantDevs/webargs-sanic
examples/user_simple_storage/extentions/exceptions.py
handle_404
def handle_404(request, exception): '''Handle 404 Not Found This handler should be used to handle error http 404 not found for all endpoints or if resource not available. ''' error = format_error(title='Resource not found', detail=str(exception)) return json(return_an_error(error), status=HTTPStatus.NOT_FOUND)
python
def handle_404(request, exception): '''Handle 404 Not Found This handler should be used to handle error http 404 not found for all endpoints or if resource not available. ''' error = format_error(title='Resource not found', detail=str(exception)) return json(return_an_error(error), status=HTTPStatus.NOT_FOUND)
[ "def", "handle_404", "(", "request", ",", "exception", ")", ":", "error", "=", "format_error", "(", "title", "=", "'Resource not found'", ",", "detail", "=", "str", "(", "exception", ")", ")", "return", "json", "(", "return_an_error", "(", "error", ")", ",...
Handle 404 Not Found This handler should be used to handle error http 404 not found for all endpoints or if resource not available.
[ "Handle", "404", "Not", "Found", "This", "handler", "should", "be", "used", "to", "handle", "error", "http", "404", "not", "found", "for", "all", "endpoints", "or", "if", "resource", "not", "available", "." ]
train
https://github.com/EndurantDevs/webargs-sanic/blob/8861a3b7d16d43a0b7e6669115eb93b0553f1b63/examples/user_simple_storage/extentions/exceptions.py#L15-L21
dailymuse/oz
oz/__init__.py
_add_to_dict
def _add_to_dict(t, container, name, value): """ Adds an item to a dictionary, or raises an exception if an item with the specified key already exists in the dictionary. """ if name in container: raise Exception("%s '%s' already exists" % (t, name)) else: container[name] = value
python
def _add_to_dict(t, container, name, value): """ Adds an item to a dictionary, or raises an exception if an item with the specified key already exists in the dictionary. """ if name in container: raise Exception("%s '%s' already exists" % (t, name)) else: container[name] = value
[ "def", "_add_to_dict", "(", "t", ",", "container", ",", "name", ",", "value", ")", ":", "if", "name", "in", "container", ":", "raise", "Exception", "(", "\"%s '%s' already exists\"", "%", "(", "t", ",", "name", ")", ")", "else", ":", "container", "[", ...
Adds an item to a dictionary, or raises an exception if an item with the specified key already exists in the dictionary.
[ "Adds", "an", "item", "to", "a", "dictionary", "or", "raises", "an", "exception", "if", "an", "item", "with", "the", "specified", "key", "already", "exists", "in", "the", "dictionary", "." ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/__init__.py#L41-L50
dailymuse/oz
oz/__init__.py
initialize
def initialize(config=None): """Initializes oz""" # Load the config file if config == None: config = {} config_source = None try: with open(os.environ.get("OZ_CONFIG", "config.py")) as f: config_source = f.read() except Exception: tornado.log.gen_log.info("Could not read config.py", exc_info=True) if config_source != None: tornado.util.exec_in(config_source, config, config) # Load the plugins for p in config.get("plugins", ["oz.core"]): plugin(p) # Set the options for key, value in config.get("app_options", {}).items(): setattr(tornado.options.options, key, value) # Generate the application settings global settings settings = tornado.options.options.as_dict() settings["ui_modules"] = _uimodules settings["project_name"] = config.get("project_name")
python
def initialize(config=None): """Initializes oz""" # Load the config file if config == None: config = {} config_source = None try: with open(os.environ.get("OZ_CONFIG", "config.py")) as f: config_source = f.read() except Exception: tornado.log.gen_log.info("Could not read config.py", exc_info=True) if config_source != None: tornado.util.exec_in(config_source, config, config) # Load the plugins for p in config.get("plugins", ["oz.core"]): plugin(p) # Set the options for key, value in config.get("app_options", {}).items(): setattr(tornado.options.options, key, value) # Generate the application settings global settings settings = tornado.options.options.as_dict() settings["ui_modules"] = _uimodules settings["project_name"] = config.get("project_name")
[ "def", "initialize", "(", "config", "=", "None", ")", ":", "# Load the config file", "if", "config", "==", "None", ":", "config", "=", "{", "}", "config_source", "=", "None", "try", ":", "with", "open", "(", "os", ".", "environ", ".", "get", "(", "\"OZ...
Initializes oz
[ "Initializes", "oz" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/__init__.py#L167-L196
dailymuse/oz
oz/__init__.py
RequestHandler.trigger
def trigger(self, name, *args, **kwargs): """ Triggers an event to run through middleware. This method will execute a chain of relevant trigger callbacks, until one of the callbacks returns the `break_trigger`. """ # Relevant middleware is cached so we don't have to rediscover it # every time. Fetch the cached value if possible. listeners = self._triggers.get(name, []) # Execute each piece of middleware for listener in listeners: result = listener(*args, **kwargs) if result == break_trigger: return False return True
python
def trigger(self, name, *args, **kwargs): """ Triggers an event to run through middleware. This method will execute a chain of relevant trigger callbacks, until one of the callbacks returns the `break_trigger`. """ # Relevant middleware is cached so we don't have to rediscover it # every time. Fetch the cached value if possible. listeners = self._triggers.get(name, []) # Execute each piece of middleware for listener in listeners: result = listener(*args, **kwargs) if result == break_trigger: return False return True
[ "def", "trigger", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Relevant middleware is cached so we don't have to rediscover it", "# every time. Fetch the cached value if possible.", "listeners", "=", "self", ".", "_triggers", ".", "g...
Triggers an event to run through middleware. This method will execute a chain of relevant trigger callbacks, until one of the callbacks returns the `break_trigger`.
[ "Triggers", "an", "event", "to", "run", "through", "middleware", ".", "This", "method", "will", "execute", "a", "chain", "of", "relevant", "trigger", "callbacks", "until", "one", "of", "the", "callbacks", "returns", "the", "break_trigger", "." ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/__init__.py#L106-L125
michaelpb/omnic
omnic/types/resource.py
Resource.cache_makedirs
def cache_makedirs(self, subdir=None): ''' Make necessary directories to hold cache value ''' if subdir is not None: dirname = self.cache_path if subdir: dirname = os.path.join(dirname, subdir) else: dirname = os.path.dirname(self.cache_path) os.makedirs(dirname, exist_ok=True)
python
def cache_makedirs(self, subdir=None): ''' Make necessary directories to hold cache value ''' if subdir is not None: dirname = self.cache_path if subdir: dirname = os.path.join(dirname, subdir) else: dirname = os.path.dirname(self.cache_path) os.makedirs(dirname, exist_ok=True)
[ "def", "cache_makedirs", "(", "self", ",", "subdir", "=", "None", ")", ":", "if", "subdir", "is", "not", "None", ":", "dirname", "=", "self", ".", "cache_path", "if", "subdir", ":", "dirname", "=", "os", ".", "path", ".", "join", "(", "dirname", ",",...
Make necessary directories to hold cache value
[ "Make", "necessary", "directories", "to", "hold", "cache", "value" ]
train
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/types/resource.py#L63-L73
dailymuse/oz
oz/sqlalchemy/__init__.py
session
def session(connection_string=None): """Gets a SQLAlchemy session""" global _session_makers connection_string = connection_string or oz.settings["db"] if not connection_string in _session_makers: _session_makers[connection_string] = sessionmaker(bind=engine(connection_string=connection_string)) return _session_makers[connection_string]()
python
def session(connection_string=None): """Gets a SQLAlchemy session""" global _session_makers connection_string = connection_string or oz.settings["db"] if not connection_string in _session_makers: _session_makers[connection_string] = sessionmaker(bind=engine(connection_string=connection_string)) return _session_makers[connection_string]()
[ "def", "session", "(", "connection_string", "=", "None", ")", ":", "global", "_session_makers", "connection_string", "=", "connection_string", "or", "oz", ".", "settings", "[", "\"db\"", "]", "if", "not", "connection_string", "in", "_session_makers", ":", "_sessio...
Gets a SQLAlchemy session
[ "Gets", "a", "SQLAlchemy", "session" ]
train
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/sqlalchemy/__init__.py#L69-L77
biocore/burrito-fillings
bfillings/blat.py
assign_reads_to_database
def assign_reads_to_database(query_fasta_fp, database_fasta_fp, output_fp, params=None): """Assign a set of query sequences to a reference database query_fasta_fp : absolute file path to query sequences database_fasta_fp : absolute file path to the reference database output_fp : absolute file path of the output file to write params : dict of BLAT specific parameters. This method returns an open file object. The output format defaults to blast9 and should be parsable by the PyCogent BLAST parsers. """ if params is None: params = {} if '-out' not in params: params['-out'] = 'blast9' blat = Blat(params=params) result = blat([query_fasta_fp, database_fasta_fp, output_fp]) return result['output']
python
def assign_reads_to_database(query_fasta_fp, database_fasta_fp, output_fp, params=None): """Assign a set of query sequences to a reference database query_fasta_fp : absolute file path to query sequences database_fasta_fp : absolute file path to the reference database output_fp : absolute file path of the output file to write params : dict of BLAT specific parameters. This method returns an open file object. The output format defaults to blast9 and should be parsable by the PyCogent BLAST parsers. """ if params is None: params = {} if '-out' not in params: params['-out'] = 'blast9' blat = Blat(params=params) result = blat([query_fasta_fp, database_fasta_fp, output_fp]) return result['output']
[ "def", "assign_reads_to_database", "(", "query_fasta_fp", ",", "database_fasta_fp", ",", "output_fp", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "if", "'-out'", "not", "in", "params", ":", "params", "[...
Assign a set of query sequences to a reference database query_fasta_fp : absolute file path to query sequences database_fasta_fp : absolute file path to the reference database output_fp : absolute file path of the output file to write params : dict of BLAT specific parameters. This method returns an open file object. The output format defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
[ "Assign", "a", "set", "of", "query", "sequences", "to", "a", "reference", "database" ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blat.py#L285-L304
biocore/burrito-fillings
bfillings/blat.py
assign_dna_reads_to_dna_database
def assign_dna_reads_to_dna_database(query_fasta_fp, database_fasta_fp, output_fp, params=None): """Assign DNA reads to a database fasta of DNA sequences. Wraps assign_reads_to_database, setting database and query types. All parameters are set to default unless params is passed. query_fasta_fp: absolute path to the query fasta file containing DNA sequences. database_fasta_fp: absolute path to the database fasta file containing DNA sequences. output_fp: absolute path where the output file will be generated. params: optional. dict containing parameter settings to be used instead of default values. Cannot change database or query file types from dna and dna, respectively. This method returns an open file object. The output format defaults to blast9 and should be parsable by the PyCogent BLAST parsers. """ if params is None: params = {} my_params = {'-t': 'dna', '-q': 'dna' } # if the user specified parameters other than default, then use them. # However, if they try to change the database or query types, raise an # applciation error. if '-t' in params or '-q' in params: raise ApplicationError("Cannot change database or query types when " + "using assign_dna_reads_to_dna_database. " + "Use assign_reads_to_database instead.\n") my_params.update(params) result = assign_reads_to_database(query_fasta_fp, database_fasta_fp, output_fp, my_params) return result
python
def assign_dna_reads_to_dna_database(query_fasta_fp, database_fasta_fp, output_fp, params=None): """Assign DNA reads to a database fasta of DNA sequences. Wraps assign_reads_to_database, setting database and query types. All parameters are set to default unless params is passed. query_fasta_fp: absolute path to the query fasta file containing DNA sequences. database_fasta_fp: absolute path to the database fasta file containing DNA sequences. output_fp: absolute path where the output file will be generated. params: optional. dict containing parameter settings to be used instead of default values. Cannot change database or query file types from dna and dna, respectively. This method returns an open file object. The output format defaults to blast9 and should be parsable by the PyCogent BLAST parsers. """ if params is None: params = {} my_params = {'-t': 'dna', '-q': 'dna' } # if the user specified parameters other than default, then use them. # However, if they try to change the database or query types, raise an # applciation error. if '-t' in params or '-q' in params: raise ApplicationError("Cannot change database or query types when " + "using assign_dna_reads_to_dna_database. " + "Use assign_reads_to_database instead.\n") my_params.update(params) result = assign_reads_to_database(query_fasta_fp, database_fasta_fp, output_fp, my_params) return result
[ "def", "assign_dna_reads_to_dna_database", "(", "query_fasta_fp", ",", "database_fasta_fp", ",", "output_fp", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "my_params", "=", "{", "'-t'", ":", "'dna'", ",", ...
Assign DNA reads to a database fasta of DNA sequences. Wraps assign_reads_to_database, setting database and query types. All parameters are set to default unless params is passed. query_fasta_fp: absolute path to the query fasta file containing DNA sequences. database_fasta_fp: absolute path to the database fasta file containing DNA sequences. output_fp: absolute path where the output file will be generated. params: optional. dict containing parameter settings to be used instead of default values. Cannot change database or query file types from dna and dna, respectively. This method returns an open file object. The output format defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
[ "Assign", "DNA", "reads", "to", "a", "database", "fasta", "of", "DNA", "sequences", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blat.py#L307-L346
biocore/burrito-fillings
bfillings/blat.py
assign_dna_reads_to_protein_database
def assign_dna_reads_to_protein_database(query_fasta_fp, database_fasta_fp, output_fp, temp_dir="/tmp", params=None): """Assign DNA reads to a database fasta of protein sequences. Wraps assign_reads_to_database, setting database and query types. All parameters are set to default unless params is passed. A temporary file must be written containing the translated sequences from the input query fasta file because BLAT cannot do this automatically. query_fasta_fp: absolute path to the query fasta file containing DNA sequences. database_fasta_fp: absolute path to the database fasta file containing protein sequences. output_fp: absolute path where the output file will be generated. temp_dir: optional. Change the location where the translated sequences will be written before being used as the query. Defaults to /tmp. params: optional. dict containing parameter settings to be used instead of default values. Cannot change database or query file types from protein and dna, respectively. This method returns an open file object. The output format defaults to blast9 and should be parsable by the PyCogent BLAST parsers. """ if params is None: params = {} my_params = {'-t': 'prot', '-q': 'prot'} # make sure temp_dir specifies an absolute path if not isabs(temp_dir): raise ApplicationError("temp_dir must be an absolute path.") # if the user specified parameters other than default, then use them. # However, if they try to change the database or query types, raise an # applciation error. if '-t' in params or '-q' in params: raise ApplicationError("Cannot change database or query types " "when using assign_dna_reads_to_dna_database. Use " "assign_reads_to_database instead.") if 'genetic_code' in params: my_genetic_code = GeneticCodes[params['genetic_code']] del params['genetic_code'] else: my_genetic_code = GeneticCodes[1] my_params.update(params) # get six-frame translation of the input DNA sequences and write them to # temporary file. _, tmp = mkstemp(dir=temp_dir) tmp_out = open(tmp, 'w') for label, sequence in parse_fasta(open(query_fasta_fp)): seq_id = label.split()[0] s = DNA.makeSequence(sequence) translations = my_genetic_code.sixframes(s) frames = [1, 2, 3, -1, -2, -3] translations = dict(zip(frames, translations)) for frame, translation in sorted(translations.iteritems()): entry = '>{seq_id}_frame_{frame}\n{trans}\n' entry = entry.format(seq_id=seq_id, frame=frame, trans=translation) tmp_out.write(entry) tmp_out.close() result = assign_reads_to_database(tmp, database_fasta_fp, output_fp, params=my_params) remove(tmp) return result
python
def assign_dna_reads_to_protein_database(query_fasta_fp, database_fasta_fp, output_fp, temp_dir="/tmp", params=None): """Assign DNA reads to a database fasta of protein sequences. Wraps assign_reads_to_database, setting database and query types. All parameters are set to default unless params is passed. A temporary file must be written containing the translated sequences from the input query fasta file because BLAT cannot do this automatically. query_fasta_fp: absolute path to the query fasta file containing DNA sequences. database_fasta_fp: absolute path to the database fasta file containing protein sequences. output_fp: absolute path where the output file will be generated. temp_dir: optional. Change the location where the translated sequences will be written before being used as the query. Defaults to /tmp. params: optional. dict containing parameter settings to be used instead of default values. Cannot change database or query file types from protein and dna, respectively. This method returns an open file object. The output format defaults to blast9 and should be parsable by the PyCogent BLAST parsers. """ if params is None: params = {} my_params = {'-t': 'prot', '-q': 'prot'} # make sure temp_dir specifies an absolute path if not isabs(temp_dir): raise ApplicationError("temp_dir must be an absolute path.") # if the user specified parameters other than default, then use them. # However, if they try to change the database or query types, raise an # applciation error. if '-t' in params or '-q' in params: raise ApplicationError("Cannot change database or query types " "when using assign_dna_reads_to_dna_database. Use " "assign_reads_to_database instead.") if 'genetic_code' in params: my_genetic_code = GeneticCodes[params['genetic_code']] del params['genetic_code'] else: my_genetic_code = GeneticCodes[1] my_params.update(params) # get six-frame translation of the input DNA sequences and write them to # temporary file. _, tmp = mkstemp(dir=temp_dir) tmp_out = open(tmp, 'w') for label, sequence in parse_fasta(open(query_fasta_fp)): seq_id = label.split()[0] s = DNA.makeSequence(sequence) translations = my_genetic_code.sixframes(s) frames = [1, 2, 3, -1, -2, -3] translations = dict(zip(frames, translations)) for frame, translation in sorted(translations.iteritems()): entry = '>{seq_id}_frame_{frame}\n{trans}\n' entry = entry.format(seq_id=seq_id, frame=frame, trans=translation) tmp_out.write(entry) tmp_out.close() result = assign_reads_to_database(tmp, database_fasta_fp, output_fp, params=my_params) remove(tmp) return result
[ "def", "assign_dna_reads_to_protein_database", "(", "query_fasta_fp", ",", "database_fasta_fp", ",", "output_fp", ",", "temp_dir", "=", "\"/tmp\"", ",", "params", "=", "None", ")", ":", "if", "params", "is", "None", ":", "params", "=", "{", "}", "my_params", "...
Assign DNA reads to a database fasta of protein sequences. Wraps assign_reads_to_database, setting database and query types. All parameters are set to default unless params is passed. A temporary file must be written containing the translated sequences from the input query fasta file because BLAT cannot do this automatically. query_fasta_fp: absolute path to the query fasta file containing DNA sequences. database_fasta_fp: absolute path to the database fasta file containing protein sequences. output_fp: absolute path where the output file will be generated. temp_dir: optional. Change the location where the translated sequences will be written before being used as the query. Defaults to /tmp. params: optional. dict containing parameter settings to be used instead of default values. Cannot change database or query file types from protein and dna, respectively. This method returns an open file object. The output format defaults to blast9 and should be parsable by the PyCogent BLAST parsers.
[ "Assign", "DNA", "reads", "to", "a", "database", "fasta", "of", "protein", "sequences", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blat.py#L349-L422
biocore/burrito-fillings
bfillings/blat.py
Blat._get_base_command
def _get_base_command(self): """Gets the command that will be run when the app controller is called. """ command_parts = [] cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) if self._command is None: raise ApplicationError('_command has not been set.') command = self._command parameters = sorted([str(x) for x in self.Parameters.values() if str(x)]) synonyms = self._synonyms command_parts.append(cd_command) command_parts.append(command) command_parts.append(self._database) # Positional argument command_parts.append(self._query) # Positional argument command_parts += parameters if self._output: command_parts.append(self._output.Path) # Positional return ( self._command_delimiter.join(filter(None, command_parts)).strip() )
python
def _get_base_command(self): """Gets the command that will be run when the app controller is called. """ command_parts = [] cd_command = ''.join(['cd ', str(self.WorkingDir), ';']) if self._command is None: raise ApplicationError('_command has not been set.') command = self._command parameters = sorted([str(x) for x in self.Parameters.values() if str(x)]) synonyms = self._synonyms command_parts.append(cd_command) command_parts.append(command) command_parts.append(self._database) # Positional argument command_parts.append(self._query) # Positional argument command_parts += parameters if self._output: command_parts.append(self._output.Path) # Positional return ( self._command_delimiter.join(filter(None, command_parts)).strip() )
[ "def", "_get_base_command", "(", "self", ")", ":", "command_parts", "=", "[", "]", "cd_command", "=", "''", ".", "join", "(", "[", "'cd '", ",", "str", "(", "self", ".", "WorkingDir", ")", ",", "';'", "]", ")", "if", "self", ".", "_command", "is", ...
Gets the command that will be run when the app controller is called.
[ "Gets", "the", "command", "that", "will", "be", "run", "when", "the", "app", "controller", "is", "called", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blat.py#L157-L181
biocore/burrito-fillings
bfillings/blat.py
Blat._input_as_list
def _input_as_list(self, data): '''Takes the positional arguments as input in a list. The list input here should be [query_file_path, database_file_path, output_file_path]''' query, database, output = data if (not isabs(database)) \ or (not isabs(query)) \ or (not isabs(output)): raise ApplicationError("Only absolute paths allowed.\n%s" % ', '.join(data)) self._database = FilePath(database) self._query = FilePath(query) self._output = ResultPath(output, IsWritten=True) # check parameters that can only take a particular set of values # check combination of databse and query type if self.Parameters['-t'].isOn() and self.Parameters['-q'].isOn() and \ (self.Parameters['-t'].Value, self.Parameters['-q'].Value) not in \ self._valid_combinations: error_message = "Invalid combination of database and query " + \ "types ('%s', '%s').\n" % \ (self.Paramters['-t'].Value, self.Parameters['-q'].Value) error_message += "Must be one of: %s\n" % \ repr(self._valid_combinations) raise ApplicationError(error_message) # check database type if self.Parameters['-t'].isOn() and \ self.Parameters['-t'].Value not in self._database_types: error_message = "Invalid database type %s\n" % \ self.Parameters['-t'].Value error_message += "Allowed values: %s\n" % \ ', '.join(self._database_types) raise ApplicationError(error_message) # check query type if self.Parameters['-q'].isOn() and \ self.Parameters['-q'].Value not in self._query_types: error_message = "Invalid query type %s\n" % \ self.Parameters['-q'].Value error_message += "Allowed values: %s\n" % \ ', '.join(self._query_types) raise ApplicationError(error_message) # check mask type if self.Parameters['-mask'].isOn() and \ self.Parameters['-mask'].Value not in self._mask_types: error_message = "Invalid mask type %s\n" % \ self.Parameters['-mask'] error_message += "Allowed Values: %s\n" % \ ', '.join(self._mask_types) raise ApplicationError(error_message) # check qmask type if self.Parameters['-qMask'].isOn() and \ self.Parameters['-qMask'].Value not in self._mask_types: error_message = "Invalid qMask type %s\n" % \ self.Parameters['-qMask'].Value error_message += "Allowed values: %s\n" % \ ', '.join(self._mask_types) raise ApplicationError(error_message) # check repeat type if self.Parameters['-repeats'].isOn() and \ self.Parameters['-repeats'].Value not in self._mask_types: error_message = "Invalid repeat type %s\n" % \ self.Parameters['-repeat'].Value error_message += "Allowed values: %s\n" % \ ', '.join(self._mask_types) raise ApplicationError(error_message) # check output format if self.Parameters['-out'].isOn() and \ self.Parameters['-out'].Value not in self._out_types: error_message = "Invalid output type %s\n" % \ self.Parameters['-out'] error_message += "Allowed values: %s\n" % \ ', '.join(self._out_types) raise ApplicationError(error_message) return ''
python
def _input_as_list(self, data): '''Takes the positional arguments as input in a list. The list input here should be [query_file_path, database_file_path, output_file_path]''' query, database, output = data if (not isabs(database)) \ or (not isabs(query)) \ or (not isabs(output)): raise ApplicationError("Only absolute paths allowed.\n%s" % ', '.join(data)) self._database = FilePath(database) self._query = FilePath(query) self._output = ResultPath(output, IsWritten=True) # check parameters that can only take a particular set of values # check combination of databse and query type if self.Parameters['-t'].isOn() and self.Parameters['-q'].isOn() and \ (self.Parameters['-t'].Value, self.Parameters['-q'].Value) not in \ self._valid_combinations: error_message = "Invalid combination of database and query " + \ "types ('%s', '%s').\n" % \ (self.Paramters['-t'].Value, self.Parameters['-q'].Value) error_message += "Must be one of: %s\n" % \ repr(self._valid_combinations) raise ApplicationError(error_message) # check database type if self.Parameters['-t'].isOn() and \ self.Parameters['-t'].Value not in self._database_types: error_message = "Invalid database type %s\n" % \ self.Parameters['-t'].Value error_message += "Allowed values: %s\n" % \ ', '.join(self._database_types) raise ApplicationError(error_message) # check query type if self.Parameters['-q'].isOn() and \ self.Parameters['-q'].Value not in self._query_types: error_message = "Invalid query type %s\n" % \ self.Parameters['-q'].Value error_message += "Allowed values: %s\n" % \ ', '.join(self._query_types) raise ApplicationError(error_message) # check mask type if self.Parameters['-mask'].isOn() and \ self.Parameters['-mask'].Value not in self._mask_types: error_message = "Invalid mask type %s\n" % \ self.Parameters['-mask'] error_message += "Allowed Values: %s\n" % \ ', '.join(self._mask_types) raise ApplicationError(error_message) # check qmask type if self.Parameters['-qMask'].isOn() and \ self.Parameters['-qMask'].Value not in self._mask_types: error_message = "Invalid qMask type %s\n" % \ self.Parameters['-qMask'].Value error_message += "Allowed values: %s\n" % \ ', '.join(self._mask_types) raise ApplicationError(error_message) # check repeat type if self.Parameters['-repeats'].isOn() and \ self.Parameters['-repeats'].Value not in self._mask_types: error_message = "Invalid repeat type %s\n" % \ self.Parameters['-repeat'].Value error_message += "Allowed values: %s\n" % \ ', '.join(self._mask_types) raise ApplicationError(error_message) # check output format if self.Parameters['-out'].isOn() and \ self.Parameters['-out'].Value not in self._out_types: error_message = "Invalid output type %s\n" % \ self.Parameters['-out'] error_message += "Allowed values: %s\n" % \ ', '.join(self._out_types) raise ApplicationError(error_message) return ''
[ "def", "_input_as_list", "(", "self", ",", "data", ")", ":", "query", ",", "database", ",", "output", "=", "data", "if", "(", "not", "isabs", "(", "database", ")", ")", "or", "(", "not", "isabs", "(", "query", ")", ")", "or", "(", "not", "isabs", ...
Takes the positional arguments as input in a list. The list input here should be [query_file_path, database_file_path, output_file_path]
[ "Takes", "the", "positional", "arguments", "as", "input", "in", "a", "list", "." ]
train
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/blat.py#L185-L282
castelao/oceansdb
oceansdb/woa.py
woa_profile_from_dap
def woa_profile_from_dap(var, d, lat, lon, depth, cfg): """ Monthly Climatologic Mean and Standard Deviation from WOA, used either for temperature or salinity. INPUTS time: [day of the year] lat: [-90<lat<90] lon: [-180<lon<180] depth: [meters] Reads the WOA Monthly Climatology NetCDF file and returns the corresponding WOA values of salinity or temperature mean and standard deviation for the given time, lat, lon, depth. """ if lon < 0: lon = lon+360 url = cfg['url'] doy = int(d.strftime('%j')) dataset = open_url(url) dn = (np.abs(doy-dataset['time'][:])).argmin() xn = (np.abs(lon-dataset['lon'][:])).argmin() yn = (np.abs(lat-dataset['lat'][:])).argmin() if re.match("temperature\d?$", var): mn = ma.masked_values(dataset.t_mn.t_mn[dn, :, yn, xn].reshape( dataset['depth'].shape[0]), dataset.t_mn.attributes['_FillValue']) sd = ma.masked_values(dataset.t_sd.t_sd[dn, :, yn, xn].reshape( dataset['depth'].shape[0]), dataset.t_sd.attributes['_FillValue']) # se = ma.masked_values(dataset.t_se.t_se[dn, :, yn, xn].reshape( # dataset['depth'].shape[0]), dataset.t_se.attributes['_FillValue']) # Use this in the future. A minimum # of samples # dd = ma.masked_values(dataset.t_dd.t_dd[dn, :, yn, xn].reshape( # dataset['depth'].shape[0]), dataset.t_dd.attributes['_FillValue']) elif re.match("salinity\d?$", var): mn = ma.masked_values(dataset.s_mn.s_mn[dn, :, yn, xn].reshape( dataset['depth'].shape[0]), dataset.s_mn.attributes['_FillValue']) sd = ma.masked_values(dataset.s_sd.s_sd[dn, :, yn, xn].reshape( dataset['depth'].shape[0]), dataset.s_sd.attributes['_FillValue']) # dd = ma.masked_values(dataset.s_dd.s_dd[dn, :, yn, xn].reshape( # dataset['depth'].shape[0]), dataset.s_dd.attributes['_FillValue']) zwoa = ma.array(dataset.depth[:]) ind = (depth <= zwoa.max()) & (depth >= zwoa.min()) # Mean value profile f = interp1d(zwoa[~ma.getmaskarray(mn)].compressed(), mn.compressed()) mn_interp = ma.masked_all(depth.shape) mn_interp[ind] = f(depth[ind]) # The stdev profile f = interp1d(zwoa[~ma.getmaskarray(sd)].compressed(), sd.compressed()) sd_interp = ma.masked_all(depth.shape) sd_interp[ind] = f(depth[ind]) output = {'woa_an': mn_interp, 'woa_sd': sd_interp} return output
python
def woa_profile_from_dap(var, d, lat, lon, depth, cfg): """ Monthly Climatologic Mean and Standard Deviation from WOA, used either for temperature or salinity. INPUTS time: [day of the year] lat: [-90<lat<90] lon: [-180<lon<180] depth: [meters] Reads the WOA Monthly Climatology NetCDF file and returns the corresponding WOA values of salinity or temperature mean and standard deviation for the given time, lat, lon, depth. """ if lon < 0: lon = lon+360 url = cfg['url'] doy = int(d.strftime('%j')) dataset = open_url(url) dn = (np.abs(doy-dataset['time'][:])).argmin() xn = (np.abs(lon-dataset['lon'][:])).argmin() yn = (np.abs(lat-dataset['lat'][:])).argmin() if re.match("temperature\d?$", var): mn = ma.masked_values(dataset.t_mn.t_mn[dn, :, yn, xn].reshape( dataset['depth'].shape[0]), dataset.t_mn.attributes['_FillValue']) sd = ma.masked_values(dataset.t_sd.t_sd[dn, :, yn, xn].reshape( dataset['depth'].shape[0]), dataset.t_sd.attributes['_FillValue']) # se = ma.masked_values(dataset.t_se.t_se[dn, :, yn, xn].reshape( # dataset['depth'].shape[0]), dataset.t_se.attributes['_FillValue']) # Use this in the future. A minimum # of samples # dd = ma.masked_values(dataset.t_dd.t_dd[dn, :, yn, xn].reshape( # dataset['depth'].shape[0]), dataset.t_dd.attributes['_FillValue']) elif re.match("salinity\d?$", var): mn = ma.masked_values(dataset.s_mn.s_mn[dn, :, yn, xn].reshape( dataset['depth'].shape[0]), dataset.s_mn.attributes['_FillValue']) sd = ma.masked_values(dataset.s_sd.s_sd[dn, :, yn, xn].reshape( dataset['depth'].shape[0]), dataset.s_sd.attributes['_FillValue']) # dd = ma.masked_values(dataset.s_dd.s_dd[dn, :, yn, xn].reshape( # dataset['depth'].shape[0]), dataset.s_dd.attributes['_FillValue']) zwoa = ma.array(dataset.depth[:]) ind = (depth <= zwoa.max()) & (depth >= zwoa.min()) # Mean value profile f = interp1d(zwoa[~ma.getmaskarray(mn)].compressed(), mn.compressed()) mn_interp = ma.masked_all(depth.shape) mn_interp[ind] = f(depth[ind]) # The stdev profile f = interp1d(zwoa[~ma.getmaskarray(sd)].compressed(), sd.compressed()) sd_interp = ma.masked_all(depth.shape) sd_interp[ind] = f(depth[ind]) output = {'woa_an': mn_interp, 'woa_sd': sd_interp} return output
[ "def", "woa_profile_from_dap", "(", "var", ",", "d", ",", "lat", ",", "lon", ",", "depth", ",", "cfg", ")", ":", "if", "lon", "<", "0", ":", "lon", "=", "lon", "+", "360", "url", "=", "cfg", "[", "'url'", "]", "doy", "=", "int", "(", "d", "."...
Monthly Climatologic Mean and Standard Deviation from WOA, used either for temperature or salinity. INPUTS time: [day of the year] lat: [-90<lat<90] lon: [-180<lon<180] depth: [meters] Reads the WOA Monthly Climatology NetCDF file and returns the corresponding WOA values of salinity or temperature mean and standard deviation for the given time, lat, lon, depth.
[ "Monthly", "Climatologic", "Mean", "and", "Standard", "Deviation", "from", "WOA", "used", "either", "for", "temperature", "or", "salinity", "." ]
train
https://github.com/castelao/oceansdb/blob/a154c5b845845a602800f9bc53d1702d4cb0f9c5/oceansdb/woa.py#L39-L97