repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
prompt-toolkit/ptpython | ptpython/python_input.py | PythonInput.selected_option | def selected_option(self):
" Return the currently selected option. "
i = 0
for category in self.options:
for o in category.options:
if i == self.selected_option_index:
return o
else:
i += 1 | python | def selected_option(self):
" Return the currently selected option. "
i = 0
for category in self.options:
for o in category.options:
if i == self.selected_option_index:
return o
else:
i += 1 | [
"def",
"selected_option",
"(",
"self",
")",
":",
"i",
"=",
"0",
"for",
"category",
"in",
"self",
".",
"options",
":",
"for",
"o",
"in",
"category",
".",
"options",
":",
"if",
"i",
"==",
"self",
".",
"selected_option_index",
":",
"return",
"o",
"else",
... | Return the currently selected option. | [
"Return",
"the",
"currently",
"selected",
"option",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L290-L298 | train | 205,200 |
prompt-toolkit/ptpython | ptpython/python_input.py | PythonInput.get_compiler_flags | def get_compiler_flags(self):
"""
Give the current compiler flags by looking for _Feature instances
in the globals.
"""
flags = 0
for value in self.get_globals().values():
if isinstance(value, __future__._Feature):
flags |= value.compiler_flag
return flags | python | def get_compiler_flags(self):
"""
Give the current compiler flags by looking for _Feature instances
in the globals.
"""
flags = 0
for value in self.get_globals().values():
if isinstance(value, __future__._Feature):
flags |= value.compiler_flag
return flags | [
"def",
"get_compiler_flags",
"(",
"self",
")",
":",
"flags",
"=",
"0",
"for",
"value",
"in",
"self",
".",
"get_globals",
"(",
")",
".",
"values",
"(",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"__future__",
".",
"_Feature",
")",
":",
"flags",
... | Give the current compiler flags by looking for _Feature instances
in the globals. | [
"Give",
"the",
"current",
"compiler",
"flags",
"by",
"looking",
"for",
"_Feature",
"instances",
"in",
"the",
"globals",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L300-L311 | train | 205,201 |
prompt-toolkit/ptpython | ptpython/python_input.py | PythonInput.install_code_colorscheme | def install_code_colorscheme(self, name, style_dict):
"""
Install a new code color scheme.
"""
assert isinstance(name, six.text_type)
assert isinstance(style_dict, dict)
self.code_styles[name] = style_dict | python | def install_code_colorscheme(self, name, style_dict):
"""
Install a new code color scheme.
"""
assert isinstance(name, six.text_type)
assert isinstance(style_dict, dict)
self.code_styles[name] = style_dict | [
"def",
"install_code_colorscheme",
"(",
"self",
",",
"name",
",",
"style_dict",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"six",
".",
"text_type",
")",
"assert",
"isinstance",
"(",
"style_dict",
",",
"dict",
")",
"self",
".",
"code_styles",
"[",
... | Install a new code color scheme. | [
"Install",
"a",
"new",
"code",
"color",
"scheme",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L330-L337 | train | 205,202 |
prompt-toolkit/ptpython | ptpython/python_input.py | PythonInput.install_ui_colorscheme | def install_ui_colorscheme(self, name, style_dict):
"""
Install a new UI color scheme.
"""
assert isinstance(name, six.text_type)
assert isinstance(style_dict, dict)
self.ui_styles[name] = style_dict | python | def install_ui_colorscheme(self, name, style_dict):
"""
Install a new UI color scheme.
"""
assert isinstance(name, six.text_type)
assert isinstance(style_dict, dict)
self.ui_styles[name] = style_dict | [
"def",
"install_ui_colorscheme",
"(",
"self",
",",
"name",
",",
"style_dict",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"six",
".",
"text_type",
")",
"assert",
"isinstance",
"(",
"style_dict",
",",
"dict",
")",
"self",
".",
"ui_styles",
"[",
"nam... | Install a new UI color scheme. | [
"Install",
"a",
"new",
"UI",
"color",
"scheme",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L348-L355 | train | 205,203 |
prompt-toolkit/ptpython | ptpython/python_input.py | PythonInput._create_application | def _create_application(self):
"""
Create an `Application` instance.
"""
return Application(
input=self.input,
output=self.output,
layout=self.ptpython_layout.layout,
key_bindings=merge_key_bindings([
load_python_bindings(self),
load_auto_suggest_bindings(),
load_sidebar_bindings(self),
load_confirm_exit_bindings(self),
ConditionalKeyBindings(
load_open_in_editor_bindings(),
Condition(lambda: self.enable_open_in_editor)),
# Extra key bindings should not be active when the sidebar is visible.
ConditionalKeyBindings(
self.extra_key_bindings,
Condition(lambda: not self.show_sidebar))
]),
color_depth=lambda: self.color_depth,
paste_mode=Condition(lambda: self.paste_mode),
mouse_support=Condition(lambda: self.enable_mouse_support),
style=DynamicStyle(lambda: self._current_style),
style_transformation=self.style_transformation,
include_default_pygments_style=False,
reverse_vi_search_direction=True) | python | def _create_application(self):
"""
Create an `Application` instance.
"""
return Application(
input=self.input,
output=self.output,
layout=self.ptpython_layout.layout,
key_bindings=merge_key_bindings([
load_python_bindings(self),
load_auto_suggest_bindings(),
load_sidebar_bindings(self),
load_confirm_exit_bindings(self),
ConditionalKeyBindings(
load_open_in_editor_bindings(),
Condition(lambda: self.enable_open_in_editor)),
# Extra key bindings should not be active when the sidebar is visible.
ConditionalKeyBindings(
self.extra_key_bindings,
Condition(lambda: not self.show_sidebar))
]),
color_depth=lambda: self.color_depth,
paste_mode=Condition(lambda: self.paste_mode),
mouse_support=Condition(lambda: self.enable_mouse_support),
style=DynamicStyle(lambda: self._current_style),
style_transformation=self.style_transformation,
include_default_pygments_style=False,
reverse_vi_search_direction=True) | [
"def",
"_create_application",
"(",
"self",
")",
":",
"return",
"Application",
"(",
"input",
"=",
"self",
".",
"input",
",",
"output",
"=",
"self",
".",
"output",
",",
"layout",
"=",
"self",
".",
"ptpython_layout",
".",
"layout",
",",
"key_bindings",
"=",
... | Create an `Application` instance. | [
"Create",
"an",
"Application",
"instance",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L555-L582 | train | 205,204 |
prompt-toolkit/ptpython | ptpython/python_input.py | PythonInput._create_buffer | def _create_buffer(self):
"""
Create the `Buffer` for the Python input.
"""
python_buffer = Buffer(
name=DEFAULT_BUFFER,
complete_while_typing=Condition(lambda: self.complete_while_typing),
enable_history_search=Condition(lambda: self.enable_history_search),
tempfile_suffix='.py',
history=self.history,
completer=ThreadedCompleter(self._completer),
validator=ConditionalValidator(
self._validator,
Condition(lambda: self.enable_input_validation)),
auto_suggest=ConditionalAutoSuggest(
ThreadedAutoSuggest(AutoSuggestFromHistory()),
Condition(lambda: self.enable_auto_suggest)),
accept_handler=self._accept_handler,
on_text_changed=self._on_input_timeout)
return python_buffer | python | def _create_buffer(self):
"""
Create the `Buffer` for the Python input.
"""
python_buffer = Buffer(
name=DEFAULT_BUFFER,
complete_while_typing=Condition(lambda: self.complete_while_typing),
enable_history_search=Condition(lambda: self.enable_history_search),
tempfile_suffix='.py',
history=self.history,
completer=ThreadedCompleter(self._completer),
validator=ConditionalValidator(
self._validator,
Condition(lambda: self.enable_input_validation)),
auto_suggest=ConditionalAutoSuggest(
ThreadedAutoSuggest(AutoSuggestFromHistory()),
Condition(lambda: self.enable_auto_suggest)),
accept_handler=self._accept_handler,
on_text_changed=self._on_input_timeout)
return python_buffer | [
"def",
"_create_buffer",
"(",
"self",
")",
":",
"python_buffer",
"=",
"Buffer",
"(",
"name",
"=",
"DEFAULT_BUFFER",
",",
"complete_while_typing",
"=",
"Condition",
"(",
"lambda",
":",
"self",
".",
"complete_while_typing",
")",
",",
"enable_history_search",
"=",
... | Create the `Buffer` for the Python input. | [
"Create",
"the",
"Buffer",
"for",
"the",
"Python",
"input",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L584-L604 | train | 205,205 |
prompt-toolkit/ptpython | ptpython/python_input.py | PythonInput._on_input_timeout | def _on_input_timeout(self, buff):
"""
When there is no input activity,
in another thread, get the signature of the current code.
"""
assert isinstance(buff, Buffer)
app = self.app
# Never run multiple get-signature threads.
if self._get_signatures_thread_running:
return
self._get_signatures_thread_running = True
document = buff.document
def run():
script = get_jedi_script_from_document(document, self.get_locals(), self.get_globals())
# Show signatures in help text.
if script:
try:
signatures = script.call_signatures()
except ValueError:
# e.g. in case of an invalid \\x escape.
signatures = []
except Exception:
# Sometimes we still get an exception (TypeError), because
# of probably bugs in jedi. We can silence them.
# See: https://github.com/davidhalter/jedi/issues/492
signatures = []
else:
# Try to access the params attribute just once. For Jedi
# signatures containing the keyword-only argument star,
# this will crash when retrieving it the first time with
# AttributeError. Every following time it works.
# See: https://github.com/jonathanslenders/ptpython/issues/47
# https://github.com/davidhalter/jedi/issues/598
try:
if signatures:
signatures[0].params
except AttributeError:
pass
else:
signatures = []
self._get_signatures_thread_running = False
# Set signatures and redraw if the text didn't change in the
# meantime. Otherwise request new signatures.
if buff.text == document.text:
self.signatures = signatures
# Set docstring in docstring buffer.
if signatures:
string = signatures[0].docstring()
if not isinstance(string, six.text_type):
string = string.decode('utf-8')
self.docstring_buffer.reset(
document=Document(string, cursor_position=0))
else:
self.docstring_buffer.reset()
app.invalidate()
else:
self._on_input_timeout(buff)
get_event_loop().run_in_executor(run) | python | def _on_input_timeout(self, buff):
"""
When there is no input activity,
in another thread, get the signature of the current code.
"""
assert isinstance(buff, Buffer)
app = self.app
# Never run multiple get-signature threads.
if self._get_signatures_thread_running:
return
self._get_signatures_thread_running = True
document = buff.document
def run():
script = get_jedi_script_from_document(document, self.get_locals(), self.get_globals())
# Show signatures in help text.
if script:
try:
signatures = script.call_signatures()
except ValueError:
# e.g. in case of an invalid \\x escape.
signatures = []
except Exception:
# Sometimes we still get an exception (TypeError), because
# of probably bugs in jedi. We can silence them.
# See: https://github.com/davidhalter/jedi/issues/492
signatures = []
else:
# Try to access the params attribute just once. For Jedi
# signatures containing the keyword-only argument star,
# this will crash when retrieving it the first time with
# AttributeError. Every following time it works.
# See: https://github.com/jonathanslenders/ptpython/issues/47
# https://github.com/davidhalter/jedi/issues/598
try:
if signatures:
signatures[0].params
except AttributeError:
pass
else:
signatures = []
self._get_signatures_thread_running = False
# Set signatures and redraw if the text didn't change in the
# meantime. Otherwise request new signatures.
if buff.text == document.text:
self.signatures = signatures
# Set docstring in docstring buffer.
if signatures:
string = signatures[0].docstring()
if not isinstance(string, six.text_type):
string = string.decode('utf-8')
self.docstring_buffer.reset(
document=Document(string, cursor_position=0))
else:
self.docstring_buffer.reset()
app.invalidate()
else:
self._on_input_timeout(buff)
get_event_loop().run_in_executor(run) | [
"def",
"_on_input_timeout",
"(",
"self",
",",
"buff",
")",
":",
"assert",
"isinstance",
"(",
"buff",
",",
"Buffer",
")",
"app",
"=",
"self",
".",
"app",
"# Never run multiple get-signature threads.",
"if",
"self",
".",
"_get_signatures_thread_running",
":",
"retur... | When there is no input activity,
in another thread, get the signature of the current code. | [
"When",
"there",
"is",
"no",
"input",
"activity",
"in",
"another",
"thread",
"get",
"the",
"signature",
"of",
"the",
"current",
"code",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L625-L691 | train | 205,206 |
prompt-toolkit/ptpython | ptpython/python_input.py | PythonInput.enter_history | def enter_history(self):
"""
Display the history.
"""
app = get_app()
app.vi_state.input_mode = InputMode.NAVIGATION
def done(f):
result = f.result()
if result is not None:
self.default_buffer.text = result
app.vi_state.input_mode = InputMode.INSERT
history = History(self, self.default_buffer.document)
future = run_coroutine_in_terminal(history.app.run_async)
future.add_done_callback(done) | python | def enter_history(self):
"""
Display the history.
"""
app = get_app()
app.vi_state.input_mode = InputMode.NAVIGATION
def done(f):
result = f.result()
if result is not None:
self.default_buffer.text = result
app.vi_state.input_mode = InputMode.INSERT
history = History(self, self.default_buffer.document)
future = run_coroutine_in_terminal(history.app.run_async)
future.add_done_callback(done) | [
"def",
"enter_history",
"(",
"self",
")",
":",
"app",
"=",
"get_app",
"(",
")",
"app",
".",
"vi_state",
".",
"input_mode",
"=",
"InputMode",
".",
"NAVIGATION",
"def",
"done",
"(",
"f",
")",
":",
"result",
"=",
"f",
".",
"result",
"(",
")",
"if",
"r... | Display the history. | [
"Display",
"the",
"history",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L696-L713 | train | 205,207 |
prompt-toolkit/ptpython | ptpython/style.py | get_all_code_styles | def get_all_code_styles():
"""
Return a mapping from style names to their classes.
"""
result = dict((name, style_from_pygments_cls(get_style_by_name(name))) for name in get_all_styles())
result['win32'] = Style.from_dict(win32_code_style)
return result | python | def get_all_code_styles():
"""
Return a mapping from style names to their classes.
"""
result = dict((name, style_from_pygments_cls(get_style_by_name(name))) for name in get_all_styles())
result['win32'] = Style.from_dict(win32_code_style)
return result | [
"def",
"get_all_code_styles",
"(",
")",
":",
"result",
"=",
"dict",
"(",
"(",
"name",
",",
"style_from_pygments_cls",
"(",
"get_style_by_name",
"(",
"name",
")",
")",
")",
"for",
"name",
"in",
"get_all_styles",
"(",
")",
")",
"result",
"[",
"'win32'",
"]",... | Return a mapping from style names to their classes. | [
"Return",
"a",
"mapping",
"from",
"style",
"names",
"to",
"their",
"classes",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/style.py#L15-L21 | train | 205,208 |
prompt-toolkit/ptpython | ptpython/ipython.py | initialize_extensions | def initialize_extensions(shell, extensions):
"""
Partial copy of `InteractiveShellApp.init_extensions` from IPython.
"""
try:
iter(extensions)
except TypeError:
pass # no extensions found
else:
for ext in extensions:
try:
shell.extension_manager.load_extension(ext)
except:
ipy_utils.warn.warn(
"Error in loading extension: %s" % ext +
"\nCheck your config files in %s" % ipy_utils.path.get_ipython_dir())
shell.showtraceback() | python | def initialize_extensions(shell, extensions):
"""
Partial copy of `InteractiveShellApp.init_extensions` from IPython.
"""
try:
iter(extensions)
except TypeError:
pass # no extensions found
else:
for ext in extensions:
try:
shell.extension_manager.load_extension(ext)
except:
ipy_utils.warn.warn(
"Error in loading extension: %s" % ext +
"\nCheck your config files in %s" % ipy_utils.path.get_ipython_dir())
shell.showtraceback() | [
"def",
"initialize_extensions",
"(",
"shell",
",",
"extensions",
")",
":",
"try",
":",
"iter",
"(",
"extensions",
")",
"except",
"TypeError",
":",
"pass",
"# no extensions found",
"else",
":",
"for",
"ext",
"in",
"extensions",
":",
"try",
":",
"shell",
".",
... | Partial copy of `InteractiveShellApp.init_extensions` from IPython. | [
"Partial",
"copy",
"of",
"InteractiveShellApp",
".",
"init_extensions",
"from",
"IPython",
"."
] | b1bba26a491324cd65e0ef46c7b818c4b88fd993 | https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/ipython.py#L227-L243 | train | 205,209 |
guma44/GEOparse | GEOparse/sra_downloader.py | SRADownloader.paths_for_download | def paths_for_download(self):
"""List of URLs available for downloading."""
if self._paths_for_download is None:
queries = list()
try:
for sra in self.gsm.relations['SRA']:
query = sra.split("=")[-1]
if 'SRX' not in query:
raise ValueError(
"Sample looks like it is not an SRA: %s" % query)
logger.info("Query: %s" % query)
queries.append(query)
except KeyError:
raise NoSRARelationException(
'No relation called SRA for %s' % self.gsm.get_accession())
# Construction of DataFrame df with paths to download
df = DataFrame(columns=['download_path'])
for query in queries:
# retrieve IDs for given SRX
searchdata = Entrez.esearch(db='sra', term=query, usehistory='y',
retmode='json')
answer = json.loads(searchdata.read())
ids = answer["esearchresult"]["idlist"]
if len(ids) != 1:
raise ValueError(
"There should be one and only one ID per SRX")
# using ID fetch the info
number_of_trials = 10
wait_time = 30
for trial in range(number_of_trials):
try:
results = Entrez.efetch(db="sra", id=ids[0],
rettype="runinfo",
retmode="text").read()
break
except HTTPError as httperr:
if "502" in str(httperr):
logger.warn(("%s, trial %i out of %i, waiting "
"for %i seconds.") % (
str(httperr),
trial,
number_of_trials,
wait_time))
time.sleep(wait_time)
elif httperr.code == 429:
# This means that there is too many requests
try:
header_wait_time = int(
httperr.headers["Retry-After"])
except:
header_wait_time = wait_time
logger.warn(("%s, trial %i out of %i, waiting "
"for %i seconds.") % (
str(httperr),
trial,
number_of_trials,
header_wait_time))
time.sleep(header_wait_time)
else:
raise httperr
try:
df_tmp = DataFrame([i.split(',') for i in results.split('\n') if i != ''][1:],
columns=[i.split(',') for i in results.split('\n') if i != ''][0])
except IndexError:
logger.error(("SRA is empty (ID: %s, query: %s). "
"Check if it is publicly available.") %
(ids[0], query))
continue
# check it first
try:
df_tmp['download_path']
except KeyError as e:
logger.error('KeyError: ' + str(e) + '\n')
logger.error(str(results) + '\n')
df = concat([df, df_tmp], sort=True)
self._paths_for_download = [path for path in df['download_path']]
return self._paths_for_download | python | def paths_for_download(self):
"""List of URLs available for downloading."""
if self._paths_for_download is None:
queries = list()
try:
for sra in self.gsm.relations['SRA']:
query = sra.split("=")[-1]
if 'SRX' not in query:
raise ValueError(
"Sample looks like it is not an SRA: %s" % query)
logger.info("Query: %s" % query)
queries.append(query)
except KeyError:
raise NoSRARelationException(
'No relation called SRA for %s' % self.gsm.get_accession())
# Construction of DataFrame df with paths to download
df = DataFrame(columns=['download_path'])
for query in queries:
# retrieve IDs for given SRX
searchdata = Entrez.esearch(db='sra', term=query, usehistory='y',
retmode='json')
answer = json.loads(searchdata.read())
ids = answer["esearchresult"]["idlist"]
if len(ids) != 1:
raise ValueError(
"There should be one and only one ID per SRX")
# using ID fetch the info
number_of_trials = 10
wait_time = 30
for trial in range(number_of_trials):
try:
results = Entrez.efetch(db="sra", id=ids[0],
rettype="runinfo",
retmode="text").read()
break
except HTTPError as httperr:
if "502" in str(httperr):
logger.warn(("%s, trial %i out of %i, waiting "
"for %i seconds.") % (
str(httperr),
trial,
number_of_trials,
wait_time))
time.sleep(wait_time)
elif httperr.code == 429:
# This means that there is too many requests
try:
header_wait_time = int(
httperr.headers["Retry-After"])
except:
header_wait_time = wait_time
logger.warn(("%s, trial %i out of %i, waiting "
"for %i seconds.") % (
str(httperr),
trial,
number_of_trials,
header_wait_time))
time.sleep(header_wait_time)
else:
raise httperr
try:
df_tmp = DataFrame([i.split(',') for i in results.split('\n') if i != ''][1:],
columns=[i.split(',') for i in results.split('\n') if i != ''][0])
except IndexError:
logger.error(("SRA is empty (ID: %s, query: %s). "
"Check if it is publicly available.") %
(ids[0], query))
continue
# check it first
try:
df_tmp['download_path']
except KeyError as e:
logger.error('KeyError: ' + str(e) + '\n')
logger.error(str(results) + '\n')
df = concat([df, df_tmp], sort=True)
self._paths_for_download = [path for path in df['download_path']]
return self._paths_for_download | [
"def",
"paths_for_download",
"(",
"self",
")",
":",
"if",
"self",
".",
"_paths_for_download",
"is",
"None",
":",
"queries",
"=",
"list",
"(",
")",
"try",
":",
"for",
"sra",
"in",
"self",
".",
"gsm",
".",
"relations",
"[",
"'SRA'",
"]",
":",
"query",
... | List of URLs available for downloading. | [
"List",
"of",
"URLs",
"available",
"for",
"downloading",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/sra_downloader.py#L129-L209 | train | 205,210 |
guma44/GEOparse | GEOparse/sra_downloader.py | SRADownloader.download | def download(self):
"""Download SRA files.
Returns:
:obj:`list` of :obj:`str`: List of downloaded files.
"""
self.downloaded_paths = list()
for path in self.paths_for_download:
downloaded_path = list()
utils.mkdir_p(os.path.abspath(self.directory))
sra_run = path.split("/")[-1]
logger.info("Analysing %s" % sra_run)
url = type(self).FTP_ADDRESS_TPL.format(
range_subdir=sra_run[:6],
file_dir=sra_run)
logger.debug("URL: %s", url)
filepath = os.path.abspath(
os.path.join(self.directory, "%s.sra" % sra_run))
utils.download_from_url(
url,
filepath,
aspera=self.aspera,
silent=self.silent,
force=self.force)
if self.filetype in ("fasta", "fastq"):
if utils.which('fastq-dump') is None:
logger.error("fastq-dump command not found")
ftype = ""
if self.filetype == "fasta":
ftype = " --fasta "
cmd = "fastq-dump"
if utils.which('parallel-fastq-dump') is None:
cmd += " %s --outdir %s %s"
else:
logger.debug("Using parallel fastq-dump")
cmd = " parallel-fastq-dump --threads %s"
cmd = cmd % self.threads
cmd += " %s --outdir %s -s %s"
cmd = cmd % (ftype, self.directory, filepath)
for fqoption, fqvalue in iteritems(self.fastq_dump_options):
if fqvalue:
cmd += (" --%s %s" % (fqoption, fqvalue))
elif fqvalue is None:
cmd += (" --%s" % fqoption)
logger.debug(cmd)
process = sp.Popen(cmd, stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
logger.info("Converting to %s/%s*.%s.gz\n" % (
self.directory, sra_run, self.filetype))
pout, perr = process.communicate()
downloaded_path = glob.glob(os.path.join(
self.directory,
"%s*.%s.gz" % (sra_run, self.filetype)))
elif self.filetype == 'sra':
downloaded_path = glob.glob(os.path.join(
self.directory,
"%s*.%s" % (sra_run, self.filetype)))
else:
downloaded_path = glob.glob(os.path.join(
self.directory,
"%s*" % sra_run))
logger.error("Filetype %s not supported." % self.filetype)
if not self.keep_sra and self.filetype != 'sra':
# Delete sra file
os.unlink(filepath)
self.downloaded_paths += downloaded_path
return self.downloaded_paths | python | def download(self):
"""Download SRA files.
Returns:
:obj:`list` of :obj:`str`: List of downloaded files.
"""
self.downloaded_paths = list()
for path in self.paths_for_download:
downloaded_path = list()
utils.mkdir_p(os.path.abspath(self.directory))
sra_run = path.split("/")[-1]
logger.info("Analysing %s" % sra_run)
url = type(self).FTP_ADDRESS_TPL.format(
range_subdir=sra_run[:6],
file_dir=sra_run)
logger.debug("URL: %s", url)
filepath = os.path.abspath(
os.path.join(self.directory, "%s.sra" % sra_run))
utils.download_from_url(
url,
filepath,
aspera=self.aspera,
silent=self.silent,
force=self.force)
if self.filetype in ("fasta", "fastq"):
if utils.which('fastq-dump') is None:
logger.error("fastq-dump command not found")
ftype = ""
if self.filetype == "fasta":
ftype = " --fasta "
cmd = "fastq-dump"
if utils.which('parallel-fastq-dump') is None:
cmd += " %s --outdir %s %s"
else:
logger.debug("Using parallel fastq-dump")
cmd = " parallel-fastq-dump --threads %s"
cmd = cmd % self.threads
cmd += " %s --outdir %s -s %s"
cmd = cmd % (ftype, self.directory, filepath)
for fqoption, fqvalue in iteritems(self.fastq_dump_options):
if fqvalue:
cmd += (" --%s %s" % (fqoption, fqvalue))
elif fqvalue is None:
cmd += (" --%s" % fqoption)
logger.debug(cmd)
process = sp.Popen(cmd, stdout=sp.PIPE,
stderr=sp.PIPE,
shell=True)
logger.info("Converting to %s/%s*.%s.gz\n" % (
self.directory, sra_run, self.filetype))
pout, perr = process.communicate()
downloaded_path = glob.glob(os.path.join(
self.directory,
"%s*.%s.gz" % (sra_run, self.filetype)))
elif self.filetype == 'sra':
downloaded_path = glob.glob(os.path.join(
self.directory,
"%s*.%s" % (sra_run, self.filetype)))
else:
downloaded_path = glob.glob(os.path.join(
self.directory,
"%s*" % sra_run))
logger.error("Filetype %s not supported." % self.filetype)
if not self.keep_sra and self.filetype != 'sra':
# Delete sra file
os.unlink(filepath)
self.downloaded_paths += downloaded_path
return self.downloaded_paths | [
"def",
"download",
"(",
"self",
")",
":",
"self",
".",
"downloaded_paths",
"=",
"list",
"(",
")",
"for",
"path",
"in",
"self",
".",
"paths_for_download",
":",
"downloaded_path",
"=",
"list",
"(",
")",
"utils",
".",
"mkdir_p",
"(",
"os",
".",
"path",
".... | Download SRA files.
Returns:
:obj:`list` of :obj:`str`: List of downloaded files. | [
"Download",
"SRA",
"files",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/sra_downloader.py#L211-L285 | train | 205,211 |
guma44/GEOparse | GEOparse/logger.py | add_log_file | def add_log_file(path):
"""Add log file.
Args:
path (:obj:`str`): Path to the log file.
"""
logfile_handler = RotatingFileHandler(
path, maxBytes=50000, backupCount=2)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(module)s - %(message)s',
datefmt="%d-%b-%Y %H:%M:%S")
logfile_handler.setFormatter(formatter)
geoparse_logger.addHandler(logfile_handler) | python | def add_log_file(path):
"""Add log file.
Args:
path (:obj:`str`): Path to the log file.
"""
logfile_handler = RotatingFileHandler(
path, maxBytes=50000, backupCount=2)
formatter = logging.Formatter(
fmt='%(asctime)s %(levelname)s %(module)s - %(message)s',
datefmt="%d-%b-%Y %H:%M:%S")
logfile_handler.setFormatter(formatter)
geoparse_logger.addHandler(logfile_handler) | [
"def",
"add_log_file",
"(",
"path",
")",
":",
"logfile_handler",
"=",
"RotatingFileHandler",
"(",
"path",
",",
"maxBytes",
"=",
"50000",
",",
"backupCount",
"=",
"2",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"fmt",
"=",
"'%(asctime)s %(levelnam... | Add log file.
Args:
path (:obj:`str`): Path to the log file. | [
"Add",
"log",
"file",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/logger.py#L27-L39 | train | 205,212 |
guma44/GEOparse | GEOparse/GEOTypes.py | _sra_download_worker | def _sra_download_worker(*args):
"""A worker to download SRA files.
To be used with multiprocessing.
"""
gsm = args[0][0]
email = args[0][1]
dirpath = args[0][2]
kwargs = args[0][3]
return (gsm.get_accession(), gsm.download_SRA(email, dirpath, **kwargs)) | python | def _sra_download_worker(*args):
"""A worker to download SRA files.
To be used with multiprocessing.
"""
gsm = args[0][0]
email = args[0][1]
dirpath = args[0][2]
kwargs = args[0][3]
return (gsm.get_accession(), gsm.download_SRA(email, dirpath, **kwargs)) | [
"def",
"_sra_download_worker",
"(",
"*",
"args",
")",
":",
"gsm",
"=",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
"email",
"=",
"args",
"[",
"0",
"]",
"[",
"1",
"]",
"dirpath",
"=",
"args",
"[",
"0",
"]",
"[",
"2",
"]",
"kwargs",
"=",
"args",
"[",... | A worker to download SRA files.
To be used with multiprocessing. | [
"A",
"worker",
"to",
"download",
"SRA",
"files",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L28-L37 | train | 205,213 |
guma44/GEOparse | GEOparse/GEOTypes.py | _supplementary_files_download_worker | def _supplementary_files_download_worker(*args):
"""A worker to download supplementary files.
To be used with multiprocessing.
"""
gsm = args[0][0]
download_sra = args[0][1]
email = args[0][2]
dirpath = args[0][3]
sra_kwargs = args[0][4]
return (gsm.get_accession(), gsm.download_supplementary_files(
directory=dirpath,
download_sra=download_sra,
email=email, **sra_kwargs)) | python | def _supplementary_files_download_worker(*args):
"""A worker to download supplementary files.
To be used with multiprocessing.
"""
gsm = args[0][0]
download_sra = args[0][1]
email = args[0][2]
dirpath = args[0][3]
sra_kwargs = args[0][4]
return (gsm.get_accession(), gsm.download_supplementary_files(
directory=dirpath,
download_sra=download_sra,
email=email, **sra_kwargs)) | [
"def",
"_supplementary_files_download_worker",
"(",
"*",
"args",
")",
":",
"gsm",
"=",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
"download_sra",
"=",
"args",
"[",
"0",
"]",
"[",
"1",
"]",
"email",
"=",
"args",
"[",
"0",
"]",
"[",
"2",
"]",
"dirpath",
... | A worker to download supplementary files.
To be used with multiprocessing. | [
"A",
"worker",
"to",
"download",
"supplementary",
"files",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L40-L53 | train | 205,214 |
guma44/GEOparse | GEOparse/GEOTypes.py | BaseGEO.get_metadata_attribute | def get_metadata_attribute(self, metaname):
"""Get the metadata attribute by the name.
Args:
metaname (:obj:`str`): Name of the attribute
Returns:
:obj:`list` or :obj:`str`: Value(s) of the requested metadata
attribute
Raises:
NoMetadataException: Attribute error
TypeError: Metadata should be a list
"""
metadata_value = self.metadata.get(metaname, None)
if metadata_value is None:
raise NoMetadataException(
"No metadata attribute named %s" % metaname)
if not isinstance(metadata_value, list):
raise TypeError("Metadata is not a list and it should be.")
if len(metadata_value) > 1:
return metadata_value
else:
return metadata_value[0] | python | def get_metadata_attribute(self, metaname):
"""Get the metadata attribute by the name.
Args:
metaname (:obj:`str`): Name of the attribute
Returns:
:obj:`list` or :obj:`str`: Value(s) of the requested metadata
attribute
Raises:
NoMetadataException: Attribute error
TypeError: Metadata should be a list
"""
metadata_value = self.metadata.get(metaname, None)
if metadata_value is None:
raise NoMetadataException(
"No metadata attribute named %s" % metaname)
if not isinstance(metadata_value, list):
raise TypeError("Metadata is not a list and it should be.")
if len(metadata_value) > 1:
return metadata_value
else:
return metadata_value[0] | [
"def",
"get_metadata_attribute",
"(",
"self",
",",
"metaname",
")",
":",
"metadata_value",
"=",
"self",
".",
"metadata",
".",
"get",
"(",
"metaname",
",",
"None",
")",
"if",
"metadata_value",
"is",
"None",
":",
"raise",
"NoMetadataException",
"(",
"\"No metada... | Get the metadata attribute by the name.
Args:
metaname (:obj:`str`): Name of the attribute
Returns:
:obj:`list` or :obj:`str`: Value(s) of the requested metadata
attribute
Raises:
NoMetadataException: Attribute error
TypeError: Metadata should be a list | [
"Get",
"the",
"metadata",
"attribute",
"by",
"the",
"name",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L98-L122 | train | 205,215 |
guma44/GEOparse | GEOparse/GEOTypes.py | BaseGEO._get_metadata_as_string | def _get_metadata_as_string(self):
"""Get the metadata as SOFT formatted string."""
metalist = []
for metaname, meta in iteritems(self.metadata):
message = "Single value in metadata dictionary should be a list!"
assert isinstance(meta, list), message
for data in meta:
if data:
metalist.append("!%s_%s = %s" % (self.geotype.capitalize(),
metaname, data))
return "\n".join(metalist) | python | def _get_metadata_as_string(self):
"""Get the metadata as SOFT formatted string."""
metalist = []
for metaname, meta in iteritems(self.metadata):
message = "Single value in metadata dictionary should be a list!"
assert isinstance(meta, list), message
for data in meta:
if data:
metalist.append("!%s_%s = %s" % (self.geotype.capitalize(),
metaname, data))
return "\n".join(metalist) | [
"def",
"_get_metadata_as_string",
"(",
"self",
")",
":",
"metalist",
"=",
"[",
"]",
"for",
"metaname",
",",
"meta",
"in",
"iteritems",
"(",
"self",
".",
"metadata",
")",
":",
"message",
"=",
"\"Single value in metadata dictionary should be a list!\"",
"assert",
"i... | Get the metadata as SOFT formatted string. | [
"Get",
"the",
"metadata",
"as",
"SOFT",
"formatted",
"string",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L143-L153 | train | 205,216 |
guma44/GEOparse | GEOparse/GEOTypes.py | BaseGEO.to_soft | def to_soft(self, path_or_handle, as_gzip=False):
"""Save the object in a SOFT format.
Args:
path_or_handle (:obj:`str` or :obj:`file`): Path or handle to
output file
as_gzip (:obj:`bool`): Save as gzip
"""
if isinstance(path_or_handle, str):
if as_gzip:
with gzip.open(path_or_handle, 'wt') as outfile:
outfile.write(self._get_object_as_soft())
else:
with open(path_or_handle, 'w') as outfile:
outfile.write(self._get_object_as_soft())
else:
path_or_handle.write(self._get_object_as_soft()) | python | def to_soft(self, path_or_handle, as_gzip=False):
"""Save the object in a SOFT format.
Args:
path_or_handle (:obj:`str` or :obj:`file`): Path or handle to
output file
as_gzip (:obj:`bool`): Save as gzip
"""
if isinstance(path_or_handle, str):
if as_gzip:
with gzip.open(path_or_handle, 'wt') as outfile:
outfile.write(self._get_object_as_soft())
else:
with open(path_or_handle, 'w') as outfile:
outfile.write(self._get_object_as_soft())
else:
path_or_handle.write(self._get_object_as_soft()) | [
"def",
"to_soft",
"(",
"self",
",",
"path_or_handle",
",",
"as_gzip",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"path_or_handle",
",",
"str",
")",
":",
"if",
"as_gzip",
":",
"with",
"gzip",
".",
"open",
"(",
"path_or_handle",
",",
"'wt'",
")",
"... | Save the object in a SOFT format.
Args:
path_or_handle (:obj:`str` or :obj:`file`): Path or handle to
output file
as_gzip (:obj:`bool`): Save as gzip | [
"Save",
"the",
"object",
"in",
"a",
"SOFT",
"format",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L159-L175 | train | 205,217 |
guma44/GEOparse | GEOparse/GEOTypes.py | SimpleGEO.head | def head(self):
"""Print short description of the object."""
summary = list()
summary.append("%s %s" % (self.geotype, self.name) + "\n")
summary.append(" - Metadata:" + "\n")
summary.append(
"\n".join(self._get_metadata_as_string().split("\n")[:5]) + "\n")
summary.append("\n")
summary.append(" - Columns:" + "\n")
summary.append(self.columns.to_string() + "\n")
summary.append("\n")
summary.append(" - Table:" + "\n")
summary.append(
"\t".join(["Index"] + self.table.columns.tolist()) + "\n")
summary.append(self.table.head().to_string(header=None) + "\n")
summary.append(" " * 40 + "..." + " " * 40 + "\n")
summary.append(" " * 40 + "..." + " " * 40 + "\n")
summary.append(" " * 40 + "..." + " " * 40 + "\n")
summary.append(self.table.tail().to_string(header=None) + "\n")
return "\n".join([str(s) for s in summary]) | python | def head(self):
"""Print short description of the object."""
summary = list()
summary.append("%s %s" % (self.geotype, self.name) + "\n")
summary.append(" - Metadata:" + "\n")
summary.append(
"\n".join(self._get_metadata_as_string().split("\n")[:5]) + "\n")
summary.append("\n")
summary.append(" - Columns:" + "\n")
summary.append(self.columns.to_string() + "\n")
summary.append("\n")
summary.append(" - Table:" + "\n")
summary.append(
"\t".join(["Index"] + self.table.columns.tolist()) + "\n")
summary.append(self.table.head().to_string(header=None) + "\n")
summary.append(" " * 40 + "..." + " " * 40 + "\n")
summary.append(" " * 40 + "..." + " " * 40 + "\n")
summary.append(" " * 40 + "..." + " " * 40 + "\n")
summary.append(self.table.tail().to_string(header=None) + "\n")
return "\n".join([str(s) for s in summary]) | [
"def",
"head",
"(",
"self",
")",
":",
"summary",
"=",
"list",
"(",
")",
"summary",
".",
"append",
"(",
"\"%s %s\"",
"%",
"(",
"self",
".",
"geotype",
",",
"self",
".",
"name",
")",
"+",
"\"\\n\"",
")",
"summary",
".",
"append",
"(",
"\" - Metadata:\"... | Print short description of the object. | [
"Print",
"short",
"description",
"of",
"the",
"object",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L267-L286 | train | 205,218 |
guma44/GEOparse | GEOparse/GEOTypes.py | SimpleGEO._get_object_as_soft | def _get_object_as_soft(self):
"""Get the object as SOFT formated string."""
soft = ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string(),
self._get_columns_as_string(),
self._get_table_as_string()]
return "\n".join(soft) | python | def _get_object_as_soft(self):
"""Get the object as SOFT formated string."""
soft = ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string(),
self._get_columns_as_string(),
self._get_table_as_string()]
return "\n".join(soft) | [
"def",
"_get_object_as_soft",
"(",
"self",
")",
":",
"soft",
"=",
"[",
"\"^%s = %s\"",
"%",
"(",
"self",
".",
"geotype",
",",
"self",
".",
"name",
")",
",",
"self",
".",
"_get_metadata_as_string",
"(",
")",
",",
"self",
".",
"_get_columns_as_string",
"(",
... | Get the object as SOFT formated string. | [
"Get",
"the",
"object",
"as",
"SOFT",
"formated",
"string",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L300-L306 | train | 205,219 |
guma44/GEOparse | GEOparse/GEOTypes.py | SimpleGEO._get_table_as_string | def _get_table_as_string(self):
"""Get table as SOFT formated string."""
tablelist = []
tablelist.append("!%s_table_begin" % self.geotype.lower())
tablelist.append("\t".join(self.table.columns))
for idx, row in self.table.iterrows():
tablelist.append("\t".join(map(str, row)))
tablelist.append("!%s_table_end" % self.geotype.lower())
return "\n".join(tablelist) | python | def _get_table_as_string(self):
"""Get table as SOFT formated string."""
tablelist = []
tablelist.append("!%s_table_begin" % self.geotype.lower())
tablelist.append("\t".join(self.table.columns))
for idx, row in self.table.iterrows():
tablelist.append("\t".join(map(str, row)))
tablelist.append("!%s_table_end" % self.geotype.lower())
return "\n".join(tablelist) | [
"def",
"_get_table_as_string",
"(",
"self",
")",
":",
"tablelist",
"=",
"[",
"]",
"tablelist",
".",
"append",
"(",
"\"!%s_table_begin\"",
"%",
"self",
".",
"geotype",
".",
"lower",
"(",
")",
")",
"tablelist",
".",
"append",
"(",
"\"\\t\"",
".",
"join",
"... | Get table as SOFT formated string. | [
"Get",
"table",
"as",
"SOFT",
"formated",
"string",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L308-L316 | train | 205,220 |
guma44/GEOparse | GEOparse/GEOTypes.py | SimpleGEO._get_columns_as_string | def _get_columns_as_string(self):
"""Returns columns as SOFT formated string."""
columnslist = []
for rowidx, row in self.columns.iterrows():
columnslist.append("#%s = %s" % (rowidx, row.description))
return "\n".join(columnslist) | python | def _get_columns_as_string(self):
"""Returns columns as SOFT formated string."""
columnslist = []
for rowidx, row in self.columns.iterrows():
columnslist.append("#%s = %s" % (rowidx, row.description))
return "\n".join(columnslist) | [
"def",
"_get_columns_as_string",
"(",
"self",
")",
":",
"columnslist",
"=",
"[",
"]",
"for",
"rowidx",
",",
"row",
"in",
"self",
".",
"columns",
".",
"iterrows",
"(",
")",
":",
"columnslist",
".",
"append",
"(",
"\"#%s = %s\"",
"%",
"(",
"rowidx",
",",
... | Returns columns as SOFT formated string. | [
"Returns",
"columns",
"as",
"SOFT",
"formated",
"string",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L318-L323 | train | 205,221 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSM.annotate | def annotate(self, gpl, annotation_column, gpl_on="ID", gsm_on="ID_REF",
in_place=False):
"""Annotate GSM with provided GPL
Args:
gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with
annotation_column (str`): Column in a table for annotation
gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID".
gsm_on (:obj:`str`): Use this column in GPL to merge.
Defaults to "ID_REF".
in_place (:obj:`bool`): Substitute table in GSM by new annotated
table. Defaults to False.
Returns:
:obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None
Raises:
TypeError: GPL should be GPL or pandas.DataFrame
"""
if isinstance(gpl, GPL):
annotation_table = gpl.table
elif isinstance(gpl, DataFrame):
annotation_table = gpl
else:
raise TypeError("gpl should be a GPL object or a pandas.DataFrame")
# annotate by merging
annotated = self.table.merge(
annotation_table[[gpl_on, annotation_column]], left_on=gsm_on,
right_on=gpl_on)
del annotated[gpl_on]
if in_place:
self.table = annotated
return None
else:
return annotated | python | def annotate(self, gpl, annotation_column, gpl_on="ID", gsm_on="ID_REF",
in_place=False):
"""Annotate GSM with provided GPL
Args:
gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with
annotation_column (str`): Column in a table for annotation
gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID".
gsm_on (:obj:`str`): Use this column in GPL to merge.
Defaults to "ID_REF".
in_place (:obj:`bool`): Substitute table in GSM by new annotated
table. Defaults to False.
Returns:
:obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None
Raises:
TypeError: GPL should be GPL or pandas.DataFrame
"""
if isinstance(gpl, GPL):
annotation_table = gpl.table
elif isinstance(gpl, DataFrame):
annotation_table = gpl
else:
raise TypeError("gpl should be a GPL object or a pandas.DataFrame")
# annotate by merging
annotated = self.table.merge(
annotation_table[[gpl_on, annotation_column]], left_on=gsm_on,
right_on=gpl_on)
del annotated[gpl_on]
if in_place:
self.table = annotated
return None
else:
return annotated | [
"def",
"annotate",
"(",
"self",
",",
"gpl",
",",
"annotation_column",
",",
"gpl_on",
"=",
"\"ID\"",
",",
"gsm_on",
"=",
"\"ID_REF\"",
",",
"in_place",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"gpl",
",",
"GPL",
")",
":",
"annotation_table",
"=",
... | Annotate GSM with provided GPL
Args:
gpl (:obj:`pandas.DataFrame`): A Platform or DataFrame to annotate with
annotation_column (str`): Column in a table for annotation
gpl_on (:obj:`str`): Use this column in GSM to merge. Defaults to "ID".
gsm_on (:obj:`str`): Use this column in GPL to merge.
Defaults to "ID_REF".
in_place (:obj:`bool`): Substitute table in GSM by new annotated
table. Defaults to False.
Returns:
:obj:`pandas.DataFrame` or :obj:`None`: Annotated table or None
Raises:
TypeError: GPL should be GPL or pandas.DataFrame | [
"Annotate",
"GSM",
"with",
"provided",
"GPL"
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L331-L367 | train | 205,222 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSM.annotate_and_average | def annotate_and_average(self, gpl, expression_column, group_by_column,
rename=True, force=False, merge_on_column=None,
gsm_on=None, gpl_on=None):
"""Annotate GSM table with provided GPL.
Args:
gpl (:obj:`GEOTypes.GPL`): Platform for annotations
expression_column (:obj:`str`): Column name which "expressions"
are represented
group_by_column (:obj:`str`): The data will be grouped and averaged
over this column and only this column will be kept
rename (:obj:`bool`): Rename output column to the
self.name. Defaults to True.
force (:obj:`bool`): If the name of the GPL does not match the platform
name in GSM proceed anyway. Defaults to False.
merge_on_column (:obj:`str`): Column to merge the data
on. Defaults to None.
gsm_on (:obj:`str`): In the case columns to merge are different in GSM
and GPL use this column in GSM. Defaults to None.
gpl_on (:obj:`str`): In the case columns to merge are different in GSM
and GPL use this column in GPL. Defaults to None.
Returns:
:obj:`pandas.DataFrame`: Annotated data
"""
if gpl.name != self.metadata['platform_id'][0] and not force:
raise KeyError("Platforms from GSM (%s) and from GPL (%s)" % (
gpl.name, self.metadata['platform_id']) +
" are incompatible. Use force=True to use this GPL.")
if merge_on_column is None and gpl_on is None and gsm_on is None:
raise Exception("You have to provide one of the two: "
"merge_on_column or gpl_on and gsm_on parameters")
if merge_on_column:
logger.info("merge_on_column is not None. Using this option.")
tmp_data = self.table.merge(gpl.table, on=merge_on_column,
how='outer')
tmp_data = tmp_data.groupby(group_by_column).mean()[
[expression_column]]
else:
if gpl_on is None or gsm_on is None:
raise Exception("Please provide both gpl_on and gsm_on or "
"provide merge_on_column only")
tmp_data = self.table.merge(gpl.table, left_on=gsm_on,
right_on=gpl_on, how='outer')
tmp_data = tmp_data.groupby(group_by_column).mean()[
[expression_column]]
if rename:
tmp_data.columns = [self.name]
return tmp_data | python | def annotate_and_average(self, gpl, expression_column, group_by_column,
rename=True, force=False, merge_on_column=None,
gsm_on=None, gpl_on=None):
"""Annotate GSM table with provided GPL.
Args:
gpl (:obj:`GEOTypes.GPL`): Platform for annotations
expression_column (:obj:`str`): Column name which "expressions"
are represented
group_by_column (:obj:`str`): The data will be grouped and averaged
over this column and only this column will be kept
rename (:obj:`bool`): Rename output column to the
self.name. Defaults to True.
force (:obj:`bool`): If the name of the GPL does not match the platform
name in GSM proceed anyway. Defaults to False.
merge_on_column (:obj:`str`): Column to merge the data
on. Defaults to None.
gsm_on (:obj:`str`): In the case columns to merge are different in GSM
and GPL use this column in GSM. Defaults to None.
gpl_on (:obj:`str`): In the case columns to merge are different in GSM
and GPL use this column in GPL. Defaults to None.
Returns:
:obj:`pandas.DataFrame`: Annotated data
"""
if gpl.name != self.metadata['platform_id'][0] and not force:
raise KeyError("Platforms from GSM (%s) and from GPL (%s)" % (
gpl.name, self.metadata['platform_id']) +
" are incompatible. Use force=True to use this GPL.")
if merge_on_column is None and gpl_on is None and gsm_on is None:
raise Exception("You have to provide one of the two: "
"merge_on_column or gpl_on and gsm_on parameters")
if merge_on_column:
logger.info("merge_on_column is not None. Using this option.")
tmp_data = self.table.merge(gpl.table, on=merge_on_column,
how='outer')
tmp_data = tmp_data.groupby(group_by_column).mean()[
[expression_column]]
else:
if gpl_on is None or gsm_on is None:
raise Exception("Please provide both gpl_on and gsm_on or "
"provide merge_on_column only")
tmp_data = self.table.merge(gpl.table, left_on=gsm_on,
right_on=gpl_on, how='outer')
tmp_data = tmp_data.groupby(group_by_column).mean()[
[expression_column]]
if rename:
tmp_data.columns = [self.name]
return tmp_data | [
"def",
"annotate_and_average",
"(",
"self",
",",
"gpl",
",",
"expression_column",
",",
"group_by_column",
",",
"rename",
"=",
"True",
",",
"force",
"=",
"False",
",",
"merge_on_column",
"=",
"None",
",",
"gsm_on",
"=",
"None",
",",
"gpl_on",
"=",
"None",
"... | Annotate GSM table with provided GPL.
Args:
gpl (:obj:`GEOTypes.GPL`): Platform for annotations
expression_column (:obj:`str`): Column name which "expressions"
are represented
group_by_column (:obj:`str`): The data will be grouped and averaged
over this column and only this column will be kept
rename (:obj:`bool`): Rename output column to the
self.name. Defaults to True.
force (:obj:`bool`): If the name of the GPL does not match the platform
name in GSM proceed anyway. Defaults to False.
merge_on_column (:obj:`str`): Column to merge the data
on. Defaults to None.
gsm_on (:obj:`str`): In the case columns to merge are different in GSM
and GPL use this column in GSM. Defaults to None.
gpl_on (:obj:`str`): In the case columns to merge are different in GSM
and GPL use this column in GPL. Defaults to None.
Returns:
:obj:`pandas.DataFrame`: Annotated data | [
"Annotate",
"GSM",
"table",
"with",
"provided",
"GPL",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L369-L417 | train | 205,223 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSM.download_supplementary_files | def download_supplementary_files(self, directory="./", download_sra=True,
email=None, sra_kwargs=None):
"""Download all supplementary data available for the sample.
Args:
directory (:obj:`str`): Directory to download the data (in this directory
function will create new directory with the files).
Defaults to "./".
download_sra (:obj:`bool`): Indicates whether to download SRA raw
data too. Defaults to True.
email (:obj:`str`): E-mail that will be provided to the Entrez.
It is mandatory if download_sra=True. Defaults to None.
sra_kwargs (:obj:`dict`, optional): Kwargs passed to the
download_SRA method. Defaults to None.
Returns:
:obj:`dict`: A key-value pair of name taken from the metadata and
paths downloaded, in the case of SRA files the key is ``SRA``.
"""
directory_path = os.path.abspath(
os.path.join(directory, "%s_%s_%s" % (
'Supp',
self.get_accession(),
# the directory name cannot contain many of the signs
re.sub(r'[\s\*\?\(\),\.;]', '_', self.metadata['title'][0]))))
utils.mkdir_p(os.path.abspath(directory_path))
downloaded_paths = dict()
if sra_kwargs is None:
sra_kwargs = {}
# Possible erroneous values that could be identified and skipped right
# after
blacklist = ('NONE',)
for metakey, metavalue in iteritems(self.metadata):
if 'supplementary_file' in metakey:
assert len(metavalue) == 1 and metavalue != ''
if metavalue[0] in blacklist:
logger.warn("%s value is blacklisted as '%s' - skipping" %
(metakey, metavalue[0]))
continue
# SRA will be downloaded elsewhere
if 'sra' not in metavalue[0]:
download_path = os.path.abspath(os.path.join(
directory,
os.path.join(directory_path,
metavalue[0].split("/")[-1])))
try:
utils.download_from_url(metavalue[0], download_path)
downloaded_paths[metavalue[0]] = download_path
except Exception as err:
logger.error(
"Cannot download %s supplementary file (%s)" % (
self.get_accession(), err))
if download_sra:
try:
downloaded_files = self.download_SRA(
email,
directory=directory,
**sra_kwargs)
downloaded_paths.update(downloaded_files)
except Exception as err:
logger.error("Cannot download %s SRA file (%s)" % (
self.get_accession(), err))
return downloaded_paths | python | def download_supplementary_files(self, directory="./", download_sra=True,
email=None, sra_kwargs=None):
"""Download all supplementary data available for the sample.
Args:
directory (:obj:`str`): Directory to download the data (in this directory
function will create new directory with the files).
Defaults to "./".
download_sra (:obj:`bool`): Indicates whether to download SRA raw
data too. Defaults to True.
email (:obj:`str`): E-mail that will be provided to the Entrez.
It is mandatory if download_sra=True. Defaults to None.
sra_kwargs (:obj:`dict`, optional): Kwargs passed to the
download_SRA method. Defaults to None.
Returns:
:obj:`dict`: A key-value pair of name taken from the metadata and
paths downloaded, in the case of SRA files the key is ``SRA``.
"""
directory_path = os.path.abspath(
os.path.join(directory, "%s_%s_%s" % (
'Supp',
self.get_accession(),
# the directory name cannot contain many of the signs
re.sub(r'[\s\*\?\(\),\.;]', '_', self.metadata['title'][0]))))
utils.mkdir_p(os.path.abspath(directory_path))
downloaded_paths = dict()
if sra_kwargs is None:
sra_kwargs = {}
# Possible erroneous values that could be identified and skipped right
# after
blacklist = ('NONE',)
for metakey, metavalue in iteritems(self.metadata):
if 'supplementary_file' in metakey:
assert len(metavalue) == 1 and metavalue != ''
if metavalue[0] in blacklist:
logger.warn("%s value is blacklisted as '%s' - skipping" %
(metakey, metavalue[0]))
continue
# SRA will be downloaded elsewhere
if 'sra' not in metavalue[0]:
download_path = os.path.abspath(os.path.join(
directory,
os.path.join(directory_path,
metavalue[0].split("/")[-1])))
try:
utils.download_from_url(metavalue[0], download_path)
downloaded_paths[metavalue[0]] = download_path
except Exception as err:
logger.error(
"Cannot download %s supplementary file (%s)" % (
self.get_accession(), err))
if download_sra:
try:
downloaded_files = self.download_SRA(
email,
directory=directory,
**sra_kwargs)
downloaded_paths.update(downloaded_files)
except Exception as err:
logger.error("Cannot download %s SRA file (%s)" % (
self.get_accession(), err))
return downloaded_paths | [
"def",
"download_supplementary_files",
"(",
"self",
",",
"directory",
"=",
"\"./\"",
",",
"download_sra",
"=",
"True",
",",
"email",
"=",
"None",
",",
"sra_kwargs",
"=",
"None",
")",
":",
"directory_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",... | Download all supplementary data available for the sample.
Args:
directory (:obj:`str`): Directory to download the data (in this directory
function will create new directory with the files).
Defaults to "./".
download_sra (:obj:`bool`): Indicates whether to download SRA raw
data too. Defaults to True.
email (:obj:`str`): E-mail that will be provided to the Entrez.
It is mandatory if download_sra=True. Defaults to None.
sra_kwargs (:obj:`dict`, optional): Kwargs passed to the
download_SRA method. Defaults to None.
Returns:
:obj:`dict`: A key-value pair of name taken from the metadata and
paths downloaded, in the case of SRA files the key is ``SRA``. | [
"Download",
"all",
"supplementary",
"data",
"available",
"for",
"the",
"sample",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L419-L482 | train | 205,224 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSM.download_SRA | def download_SRA(self, email, directory='./', **kwargs):
"""Download RAW data as SRA file.
The files will be downloaded to the sample directory created ad hoc
or the directory specified by the parameter. The sample has to come
from sequencing eg. mRNA-seq, CLIP etc.
An important parameter is a filetype. By default an SRA
is accessed by FTP and such file is downloaded. This does not
require additional libraries. However in order
to produce FASTA of FASTQ files one would need to use SRA-Toolkit.
Thus, it is assumed that this library is already installed or it
will be installed in the near future. One can immediately specify
the download type to fasta or fastq.
To see all possible ``**kwargs`` that could be passed to the function
see the description of :class:`~GEOparse.sra_downloader.SRADownloader`.
Args:
email (:obj:`str`): an email (any) - Required by NCBI for access
directory (:obj:`str`, optional): The directory to which download
the data. Defaults to "./".
**kwargs: Arbitrary keyword arguments, see description
Returns:
:obj:`dict`: A dictionary containing only one key (``SRA``) with
the list of downloaded files.
Raises:
:obj:`TypeError`: Type to download unknown
:obj:`NoSRARelationException`: No SRAToolkit
:obj:`Exception`: Wrong e-mail
:obj:`HTTPError`: Cannot access or connect to DB
"""
downloader = SRADownloader(self, email, directory, **kwargs)
return {"SRA": downloader.download()} | python | def download_SRA(self, email, directory='./', **kwargs):
"""Download RAW data as SRA file.
The files will be downloaded to the sample directory created ad hoc
or the directory specified by the parameter. The sample has to come
from sequencing eg. mRNA-seq, CLIP etc.
An important parameter is a filetype. By default an SRA
is accessed by FTP and such file is downloaded. This does not
require additional libraries. However in order
to produce FASTA of FASTQ files one would need to use SRA-Toolkit.
Thus, it is assumed that this library is already installed or it
will be installed in the near future. One can immediately specify
the download type to fasta or fastq.
To see all possible ``**kwargs`` that could be passed to the function
see the description of :class:`~GEOparse.sra_downloader.SRADownloader`.
Args:
email (:obj:`str`): an email (any) - Required by NCBI for access
directory (:obj:`str`, optional): The directory to which download
the data. Defaults to "./".
**kwargs: Arbitrary keyword arguments, see description
Returns:
:obj:`dict`: A dictionary containing only one key (``SRA``) with
the list of downloaded files.
Raises:
:obj:`TypeError`: Type to download unknown
:obj:`NoSRARelationException`: No SRAToolkit
:obj:`Exception`: Wrong e-mail
:obj:`HTTPError`: Cannot access or connect to DB
"""
downloader = SRADownloader(self, email, directory, **kwargs)
return {"SRA": downloader.download()} | [
"def",
"download_SRA",
"(",
"self",
",",
"email",
",",
"directory",
"=",
"'./'",
",",
"*",
"*",
"kwargs",
")",
":",
"downloader",
"=",
"SRADownloader",
"(",
"self",
",",
"email",
",",
"directory",
",",
"*",
"*",
"kwargs",
")",
"return",
"{",
"\"SRA\"",... | Download RAW data as SRA file.
The files will be downloaded to the sample directory created ad hoc
or the directory specified by the parameter. The sample has to come
from sequencing eg. mRNA-seq, CLIP etc.
An important parameter is a filetype. By default an SRA
is accessed by FTP and such file is downloaded. This does not
require additional libraries. However in order
to produce FASTA of FASTQ files one would need to use SRA-Toolkit.
Thus, it is assumed that this library is already installed or it
will be installed in the near future. One can immediately specify
the download type to fasta or fastq.
To see all possible ``**kwargs`` that could be passed to the function
see the description of :class:`~GEOparse.sra_downloader.SRADownloader`.
Args:
email (:obj:`str`): an email (any) - Required by NCBI for access
directory (:obj:`str`, optional): The directory to which download
the data. Defaults to "./".
**kwargs: Arbitrary keyword arguments, see description
Returns:
:obj:`dict`: A dictionary containing only one key (``SRA``) with
the list of downloaded files.
Raises:
:obj:`TypeError`: Type to download unknown
:obj:`NoSRARelationException`: No SRAToolkit
:obj:`Exception`: Wrong e-mail
:obj:`HTTPError`: Cannot access or connect to DB | [
"Download",
"RAW",
"data",
"as",
"SRA",
"file",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L484-L519 | train | 205,225 |
guma44/GEOparse | GEOparse/GEOTypes.py | GDSSubset._get_object_as_soft | def _get_object_as_soft(self):
"""Get the object as SOFT formatted string."""
soft = ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string()]
return "\n".join(soft) | python | def _get_object_as_soft(self):
"""Get the object as SOFT formatted string."""
soft = ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string()]
return "\n".join(soft) | [
"def",
"_get_object_as_soft",
"(",
"self",
")",
":",
"soft",
"=",
"[",
"\"^%s = %s\"",
"%",
"(",
"self",
".",
"geotype",
",",
"self",
".",
"name",
")",
",",
"self",
".",
"_get_metadata_as_string",
"(",
")",
"]",
"return",
"\"\\n\"",
".",
"join",
"(",
"... | Get the object as SOFT formatted string. | [
"Get",
"the",
"object",
"as",
"SOFT",
"formatted",
"string",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L578-L582 | train | 205,226 |
guma44/GEOparse | GEOparse/GEOTypes.py | GDS._get_object_as_soft | def _get_object_as_soft(self):
"""Return object as SOFT formatted string."""
soft = []
if self.database is not None:
soft.append(self.database._get_object_as_soft())
soft += ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string()]
for subset in self.subsets.values():
soft.append(subset._get_object_as_soft())
soft += ["^%s = %s" % (self.geotype, self.name),
self._get_columns_as_string(),
self._get_table_as_string()]
return "\n".join(soft) | python | def _get_object_as_soft(self):
"""Return object as SOFT formatted string."""
soft = []
if self.database is not None:
soft.append(self.database._get_object_as_soft())
soft += ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string()]
for subset in self.subsets.values():
soft.append(subset._get_object_as_soft())
soft += ["^%s = %s" % (self.geotype, self.name),
self._get_columns_as_string(),
self._get_table_as_string()]
return "\n".join(soft) | [
"def",
"_get_object_as_soft",
"(",
"self",
")",
":",
"soft",
"=",
"[",
"]",
"if",
"self",
".",
"database",
"is",
"not",
"None",
":",
"soft",
".",
"append",
"(",
"self",
".",
"database",
".",
"_get_object_as_soft",
"(",
")",
")",
"soft",
"+=",
"[",
"\... | Return object as SOFT formatted string. | [
"Return",
"object",
"as",
"SOFT",
"formatted",
"string",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L637-L649 | train | 205,227 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSE.phenotype_data | def phenotype_data(self):
"""Get the phenotype data for each of the sample."""
if self._phenotype_data is None:
pheno_data = {}
for gsm_name, gsm in iteritems(self.gsms):
tmp = {}
for key, value in iteritems(gsm.metadata):
if len(value) == 0:
tmp[key] = np.nan
elif key.startswith("characteristics_"):
for i, char in enumerate(value):
char = re.split(":\s+", char)
char_type, char_value = [char[0],
": ".join(char[1:])]
tmp[key + "." + str(
i) + "." + char_type] = char_value
else:
tmp[key] = ",".join(value)
pheno_data[gsm_name] = tmp
self._phenotype_data = DataFrame(pheno_data).T
return self._phenotype_data | python | def phenotype_data(self):
"""Get the phenotype data for each of the sample."""
if self._phenotype_data is None:
pheno_data = {}
for gsm_name, gsm in iteritems(self.gsms):
tmp = {}
for key, value in iteritems(gsm.metadata):
if len(value) == 0:
tmp[key] = np.nan
elif key.startswith("characteristics_"):
for i, char in enumerate(value):
char = re.split(":\s+", char)
char_type, char_value = [char[0],
": ".join(char[1:])]
tmp[key + "." + str(
i) + "." + char_type] = char_value
else:
tmp[key] = ",".join(value)
pheno_data[gsm_name] = tmp
self._phenotype_data = DataFrame(pheno_data).T
return self._phenotype_data | [
"def",
"phenotype_data",
"(",
"self",
")",
":",
"if",
"self",
".",
"_phenotype_data",
"is",
"None",
":",
"pheno_data",
"=",
"{",
"}",
"for",
"gsm_name",
",",
"gsm",
"in",
"iteritems",
"(",
"self",
".",
"gsms",
")",
":",
"tmp",
"=",
"{",
"}",
"for",
... | Get the phenotype data for each of the sample. | [
"Get",
"the",
"phenotype",
"data",
"for",
"each",
"of",
"the",
"sample",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L696-L716 | train | 205,228 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSE.merge_and_average | def merge_and_average(self, platform, expression_column, group_by_column,
force=False, merge_on_column=None, gsm_on=None,
gpl_on=None):
"""Merge and average GSE samples.
For given platform prepare the DataFrame with all the samples present in
the GSE annotated with given column from platform and averaged over
the column.
Args:
platform (:obj:`str` or :obj:`GEOparse.GPL`): GPL platform to use.
expression_column (:obj:`str`): Column name in which "expressions"
are represented
group_by_column (:obj:`str`): The data will be grouped and averaged
over this column and only this column will be kept
force (:obj:`bool`): If the name of the GPL does not match the
platform name in GSM proceed anyway
merge_on_column (:obj:`str`): Column to merge the data on - should
be present in both GSM and GPL
gsm_on (:obj:`str`): In the case columns to merge are different in
GSM and GPL use this column in GSM
gpl_on (:obj:`str`): In the case columns to merge are different in
GSM and GPL use this column in GPL
Returns:
:obj:`pandas.DataFrame`: Merged and averaged table of results.
"""
if isinstance(platform, str):
gpl = self.gpls[platform]
elif isinstance(platform, GPL):
gpl = platform
else:
raise ValueError("Platform has to be of type GPL or string with "
"key for platform in GSE")
data = []
for gsm in self.gsms.values():
if gpl.name == gsm.metadata['platform_id'][0]:
data.append(gsm.annotate_and_average(
gpl=gpl,
merge_on_column=merge_on_column,
expression_column=expression_column,
group_by_column=group_by_column,
force=force,
gpl_on=gpl_on,
gsm_on=gsm_on))
if len(data) == 0:
logger.warning("No samples for the platform were found\n")
return None
elif len(data) == 1:
return data[0]
else:
return data[0].join(data[1:]) | python | def merge_and_average(self, platform, expression_column, group_by_column,
force=False, merge_on_column=None, gsm_on=None,
gpl_on=None):
"""Merge and average GSE samples.
For given platform prepare the DataFrame with all the samples present in
the GSE annotated with given column from platform and averaged over
the column.
Args:
platform (:obj:`str` or :obj:`GEOparse.GPL`): GPL platform to use.
expression_column (:obj:`str`): Column name in which "expressions"
are represented
group_by_column (:obj:`str`): The data will be grouped and averaged
over this column and only this column will be kept
force (:obj:`bool`): If the name of the GPL does not match the
platform name in GSM proceed anyway
merge_on_column (:obj:`str`): Column to merge the data on - should
be present in both GSM and GPL
gsm_on (:obj:`str`): In the case columns to merge are different in
GSM and GPL use this column in GSM
gpl_on (:obj:`str`): In the case columns to merge are different in
GSM and GPL use this column in GPL
Returns:
:obj:`pandas.DataFrame`: Merged and averaged table of results.
"""
if isinstance(platform, str):
gpl = self.gpls[platform]
elif isinstance(platform, GPL):
gpl = platform
else:
raise ValueError("Platform has to be of type GPL or string with "
"key for platform in GSE")
data = []
for gsm in self.gsms.values():
if gpl.name == gsm.metadata['platform_id'][0]:
data.append(gsm.annotate_and_average(
gpl=gpl,
merge_on_column=merge_on_column,
expression_column=expression_column,
group_by_column=group_by_column,
force=force,
gpl_on=gpl_on,
gsm_on=gsm_on))
if len(data) == 0:
logger.warning("No samples for the platform were found\n")
return None
elif len(data) == 1:
return data[0]
else:
return data[0].join(data[1:]) | [
"def",
"merge_and_average",
"(",
"self",
",",
"platform",
",",
"expression_column",
",",
"group_by_column",
",",
"force",
"=",
"False",
",",
"merge_on_column",
"=",
"None",
",",
"gsm_on",
"=",
"None",
",",
"gpl_on",
"=",
"None",
")",
":",
"if",
"isinstance",... | Merge and average GSE samples.
For given platform prepare the DataFrame with all the samples present in
the GSE annotated with given column from platform and averaged over
the column.
Args:
platform (:obj:`str` or :obj:`GEOparse.GPL`): GPL platform to use.
expression_column (:obj:`str`): Column name in which "expressions"
are represented
group_by_column (:obj:`str`): The data will be grouped and averaged
over this column and only this column will be kept
force (:obj:`bool`): If the name of the GPL does not match the
platform name in GSM proceed anyway
merge_on_column (:obj:`str`): Column to merge the data on - should
be present in both GSM and GPL
gsm_on (:obj:`str`): In the case columns to merge are different in
GSM and GPL use this column in GSM
gpl_on (:obj:`str`): In the case columns to merge are different in
GSM and GPL use this column in GPL
Returns:
:obj:`pandas.DataFrame`: Merged and averaged table of results. | [
"Merge",
"and",
"average",
"GSE",
"samples",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L718-L771 | train | 205,229 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSE.pivot_samples | def pivot_samples(self, values, index="ID_REF"):
"""Pivot samples by specified column.
Construct a table in which columns (names) are the samples, index
is a specified column eg. ID_REF and values in the columns are of one
specified type.
Args:
values (:obj:`str`): Column name present in all GSMs.
index (:obj:`str`, optional): Column name that will become an index in
pivoted table. Defaults to "ID_REF".
Returns:
:obj:`pandas.DataFrame`: Pivoted data
"""
data = []
for gsm in self.gsms.values():
tmp_data = gsm.table.copy()
tmp_data["name"] = gsm.name
data.append(tmp_data)
ndf = concat(data).pivot(index=index, values=values, columns="name")
return ndf | python | def pivot_samples(self, values, index="ID_REF"):
"""Pivot samples by specified column.
Construct a table in which columns (names) are the samples, index
is a specified column eg. ID_REF and values in the columns are of one
specified type.
Args:
values (:obj:`str`): Column name present in all GSMs.
index (:obj:`str`, optional): Column name that will become an index in
pivoted table. Defaults to "ID_REF".
Returns:
:obj:`pandas.DataFrame`: Pivoted data
"""
data = []
for gsm in self.gsms.values():
tmp_data = gsm.table.copy()
tmp_data["name"] = gsm.name
data.append(tmp_data)
ndf = concat(data).pivot(index=index, values=values, columns="name")
return ndf | [
"def",
"pivot_samples",
"(",
"self",
",",
"values",
",",
"index",
"=",
"\"ID_REF\"",
")",
":",
"data",
"=",
"[",
"]",
"for",
"gsm",
"in",
"self",
".",
"gsms",
".",
"values",
"(",
")",
":",
"tmp_data",
"=",
"gsm",
".",
"table",
".",
"copy",
"(",
"... | Pivot samples by specified column.
Construct a table in which columns (names) are the samples, index
is a specified column eg. ID_REF and values in the columns are of one
specified type.
Args:
values (:obj:`str`): Column name present in all GSMs.
index (:obj:`str`, optional): Column name that will become an index in
pivoted table. Defaults to "ID_REF".
Returns:
:obj:`pandas.DataFrame`: Pivoted data | [
"Pivot",
"samples",
"by",
"specified",
"column",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L773-L795 | train | 205,230 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSE.pivot_and_annotate | def pivot_and_annotate(self, values, gpl, annotation_column, gpl_on="ID",
gsm_on="ID_REF"):
"""Annotate GSM with provided GPL.
Args:
values (:obj:`str`): Column to use as values eg. "VALUES"
gpl (:obj:`pandas.DataFrame` or :obj:`GEOparse.GPL`): A Platform or
DataFrame to annotate with.
annotation_column (:obj:`str`): Column in table for annotation.
gpl_on (:obj:`str`, optional): Use this column in GPL to merge.
Defaults to "ID".
gsm_on (:obj:`str`, optional): Use this column in GSM to merge.
Defaults to "ID_REF".
Returns:
pandas.DataFrame: Pivoted and annotated table of results
"""
if isinstance(gpl, GPL):
annotation_table = gpl.table
elif isinstance(gpl, DataFrame):
annotation_table = gpl
else:
raise TypeError("gpl should be a GPL object or a pandas.DataFrame")
pivoted_samples = self.pivot_samples(values=values, index=gsm_on)
ndf = pivoted_samples.reset_index().merge(
annotation_table[[gpl_on, annotation_column]],
left_on=gsm_on,
right_on=gpl_on).set_index(gsm_on)
del ndf[gpl_on]
ndf.columns.name = 'name'
return ndf | python | def pivot_and_annotate(self, values, gpl, annotation_column, gpl_on="ID",
gsm_on="ID_REF"):
"""Annotate GSM with provided GPL.
Args:
values (:obj:`str`): Column to use as values eg. "VALUES"
gpl (:obj:`pandas.DataFrame` or :obj:`GEOparse.GPL`): A Platform or
DataFrame to annotate with.
annotation_column (:obj:`str`): Column in table for annotation.
gpl_on (:obj:`str`, optional): Use this column in GPL to merge.
Defaults to "ID".
gsm_on (:obj:`str`, optional): Use this column in GSM to merge.
Defaults to "ID_REF".
Returns:
pandas.DataFrame: Pivoted and annotated table of results
"""
if isinstance(gpl, GPL):
annotation_table = gpl.table
elif isinstance(gpl, DataFrame):
annotation_table = gpl
else:
raise TypeError("gpl should be a GPL object or a pandas.DataFrame")
pivoted_samples = self.pivot_samples(values=values, index=gsm_on)
ndf = pivoted_samples.reset_index().merge(
annotation_table[[gpl_on, annotation_column]],
left_on=gsm_on,
right_on=gpl_on).set_index(gsm_on)
del ndf[gpl_on]
ndf.columns.name = 'name'
return ndf | [
"def",
"pivot_and_annotate",
"(",
"self",
",",
"values",
",",
"gpl",
",",
"annotation_column",
",",
"gpl_on",
"=",
"\"ID\"",
",",
"gsm_on",
"=",
"\"ID_REF\"",
")",
":",
"if",
"isinstance",
"(",
"gpl",
",",
"GPL",
")",
":",
"annotation_table",
"=",
"gpl",
... | Annotate GSM with provided GPL.
Args:
values (:obj:`str`): Column to use as values eg. "VALUES"
gpl (:obj:`pandas.DataFrame` or :obj:`GEOparse.GPL`): A Platform or
DataFrame to annotate with.
annotation_column (:obj:`str`): Column in table for annotation.
gpl_on (:obj:`str`, optional): Use this column in GPL to merge.
Defaults to "ID".
gsm_on (:obj:`str`, optional): Use this column in GSM to merge.
Defaults to "ID_REF".
Returns:
pandas.DataFrame: Pivoted and annotated table of results | [
"Annotate",
"GSM",
"with",
"provided",
"GPL",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L797-L828 | train | 205,231 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSE.download_supplementary_files | def download_supplementary_files(self, directory='series',
download_sra=True, email=None,
sra_kwargs=None, nproc=1):
"""Download supplementary data.
.. warning::
Do not use parallel option (nproc > 1) in the interactive shell.
For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_
on SO.
Args:
directory (:obj:`str`, optional): Directory to download the data
(in this directory function will create new directory with the
files), by default this will be named with the series
name + _Supp.
download_sra (:obj:`bool`, optional): Indicates whether to download
SRA raw data too. Defaults to True.
email (:obj:`str`, optional): E-mail that will be provided to the
Entrez. Defaults to None.
sra_kwargs (:obj:`dict`, optional): Kwargs passed to the
GSM.download_SRA method. Defaults to None.
nproc (:obj:`int`, optional): Number of processes for SRA download
(default is 1, no parallelization).
Returns:
:obj:`dict`: Downloaded data for each of the GSM
"""
if sra_kwargs is None:
sra_kwargs = dict()
if directory == 'series':
dirpath = os.path.abspath(self.get_accession() + "_Supp")
utils.mkdir_p(dirpath)
else:
dirpath = os.path.abspath(directory)
utils.mkdir_p(dirpath)
downloaded_paths = dict()
if nproc == 1:
# No need to parallelize, running ordinary download in loop
downloaded_paths = dict()
for gsm in itervalues(self.gsms):
logger.info(
"Downloading SRA files for %s series\n" % gsm.name)
paths = gsm.download_supplementary_files(email=email,
download_sra=download_sra,
directory=dirpath,
sra_kwargs=sra_kwargs)
downloaded_paths[gsm.name] = paths
elif nproc > 1:
# Parallelization enabled
downloaders = list()
# Collecting params for Pool.map in a loop
for gsm in itervalues(self.gsms):
downloaders.append([
gsm,
download_sra,
email,
dirpath,
sra_kwargs])
p = Pool(nproc)
results = p.map(_supplementary_files_download_worker, downloaders)
downloaded_paths = dict(results)
else:
raise ValueError("Nproc should be non-negative: %s" % str(nproc))
return downloaded_paths | python | def download_supplementary_files(self, directory='series',
download_sra=True, email=None,
sra_kwargs=None, nproc=1):
"""Download supplementary data.
.. warning::
Do not use parallel option (nproc > 1) in the interactive shell.
For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_
on SO.
Args:
directory (:obj:`str`, optional): Directory to download the data
(in this directory function will create new directory with the
files), by default this will be named with the series
name + _Supp.
download_sra (:obj:`bool`, optional): Indicates whether to download
SRA raw data too. Defaults to True.
email (:obj:`str`, optional): E-mail that will be provided to the
Entrez. Defaults to None.
sra_kwargs (:obj:`dict`, optional): Kwargs passed to the
GSM.download_SRA method. Defaults to None.
nproc (:obj:`int`, optional): Number of processes for SRA download
(default is 1, no parallelization).
Returns:
:obj:`dict`: Downloaded data for each of the GSM
"""
if sra_kwargs is None:
sra_kwargs = dict()
if directory == 'series':
dirpath = os.path.abspath(self.get_accession() + "_Supp")
utils.mkdir_p(dirpath)
else:
dirpath = os.path.abspath(directory)
utils.mkdir_p(dirpath)
downloaded_paths = dict()
if nproc == 1:
# No need to parallelize, running ordinary download in loop
downloaded_paths = dict()
for gsm in itervalues(self.gsms):
logger.info(
"Downloading SRA files for %s series\n" % gsm.name)
paths = gsm.download_supplementary_files(email=email,
download_sra=download_sra,
directory=dirpath,
sra_kwargs=sra_kwargs)
downloaded_paths[gsm.name] = paths
elif nproc > 1:
# Parallelization enabled
downloaders = list()
# Collecting params for Pool.map in a loop
for gsm in itervalues(self.gsms):
downloaders.append([
gsm,
download_sra,
email,
dirpath,
sra_kwargs])
p = Pool(nproc)
results = p.map(_supplementary_files_download_worker, downloaders)
downloaded_paths = dict(results)
else:
raise ValueError("Nproc should be non-negative: %s" % str(nproc))
return downloaded_paths | [
"def",
"download_supplementary_files",
"(",
"self",
",",
"directory",
"=",
"'series'",
",",
"download_sra",
"=",
"True",
",",
"email",
"=",
"None",
",",
"sra_kwargs",
"=",
"None",
",",
"nproc",
"=",
"1",
")",
":",
"if",
"sra_kwargs",
"is",
"None",
":",
"... | Download supplementary data.
.. warning::
Do not use parallel option (nproc > 1) in the interactive shell.
For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_
on SO.
Args:
directory (:obj:`str`, optional): Directory to download the data
(in this directory function will create new directory with the
files), by default this will be named with the series
name + _Supp.
download_sra (:obj:`bool`, optional): Indicates whether to download
SRA raw data too. Defaults to True.
email (:obj:`str`, optional): E-mail that will be provided to the
Entrez. Defaults to None.
sra_kwargs (:obj:`dict`, optional): Kwargs passed to the
GSM.download_SRA method. Defaults to None.
nproc (:obj:`int`, optional): Number of processes for SRA download
(default is 1, no parallelization).
Returns:
:obj:`dict`: Downloaded data for each of the GSM | [
"Download",
"supplementary",
"data",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L830-L895 | train | 205,232 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSE.download_SRA | def download_SRA(self, email, directory='series', filterby=None, nproc=1,
**kwargs):
"""Download SRA files for each GSM in series.
.. warning::
Do not use parallel option (nproc > 1) in the interactive shell.
For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_
on SO.
Args:
email (:obj:`str`): E-mail that will be provided to the Entrez.
directory (:obj:`str`, optional): Directory to save the data
(defaults to the 'series' which saves the data to the directory
with the name of the series + '_SRA' ending).
Defaults to "series".
filterby (:obj:`str`, optional): Filter GSM objects, argument is a
function that operates on GSM object and return bool
eg. lambda x: "brain" not in x.name. Defaults to None.
nproc (:obj:`int`, optional): Number of processes for SRA download
(default is 1, no parallelization).
**kwargs: Any arbitrary argument passed to GSM.download_SRA
method. See the documentation for more details.
Returns:
:obj:`dict`: A dictionary containing output of ``GSM.download_SRA``
method where each GSM accession ID is the key for the
output.
"""
if directory == 'series':
dirpath = os.path.abspath(self.get_accession() + "_SRA")
utils.mkdir_p(dirpath)
else:
dirpath = os.path.abspath(directory)
utils.mkdir_p(dirpath)
if filterby is not None:
gsms_to_use = [gsm for gsm in self.gsms.values() if filterby(gsm)]
else:
gsms_to_use = self.gsms.values()
if nproc == 1:
# No need to parallelize, running ordinary download in loop
downloaded_paths = dict()
for gsm in gsms_to_use:
logger.info(
"Downloading SRA files for %s series\n" % gsm.name)
downloaded_paths[gsm.name] = gsm.download_SRA(
email=email,
directory=dirpath,
**kwargs)
elif nproc > 1:
# Parallelization enabled
downloaders = list()
# Collecting params for Pool.map in a loop
for gsm in gsms_to_use:
downloaders.append([
gsm,
email,
dirpath,
kwargs])
p = Pool(nproc)
results = p.map(_sra_download_worker, downloaders)
downloaded_paths = dict(results)
else:
raise ValueError("Nproc should be non-negative: %s" % str(nproc))
return downloaded_paths | python | def download_SRA(self, email, directory='series', filterby=None, nproc=1,
**kwargs):
"""Download SRA files for each GSM in series.
.. warning::
Do not use parallel option (nproc > 1) in the interactive shell.
For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_
on SO.
Args:
email (:obj:`str`): E-mail that will be provided to the Entrez.
directory (:obj:`str`, optional): Directory to save the data
(defaults to the 'series' which saves the data to the directory
with the name of the series + '_SRA' ending).
Defaults to "series".
filterby (:obj:`str`, optional): Filter GSM objects, argument is a
function that operates on GSM object and return bool
eg. lambda x: "brain" not in x.name. Defaults to None.
nproc (:obj:`int`, optional): Number of processes for SRA download
(default is 1, no parallelization).
**kwargs: Any arbitrary argument passed to GSM.download_SRA
method. See the documentation for more details.
Returns:
:obj:`dict`: A dictionary containing output of ``GSM.download_SRA``
method where each GSM accession ID is the key for the
output.
"""
if directory == 'series':
dirpath = os.path.abspath(self.get_accession() + "_SRA")
utils.mkdir_p(dirpath)
else:
dirpath = os.path.abspath(directory)
utils.mkdir_p(dirpath)
if filterby is not None:
gsms_to_use = [gsm for gsm in self.gsms.values() if filterby(gsm)]
else:
gsms_to_use = self.gsms.values()
if nproc == 1:
# No need to parallelize, running ordinary download in loop
downloaded_paths = dict()
for gsm in gsms_to_use:
logger.info(
"Downloading SRA files for %s series\n" % gsm.name)
downloaded_paths[gsm.name] = gsm.download_SRA(
email=email,
directory=dirpath,
**kwargs)
elif nproc > 1:
# Parallelization enabled
downloaders = list()
# Collecting params for Pool.map in a loop
for gsm in gsms_to_use:
downloaders.append([
gsm,
email,
dirpath,
kwargs])
p = Pool(nproc)
results = p.map(_sra_download_worker, downloaders)
downloaded_paths = dict(results)
else:
raise ValueError("Nproc should be non-negative: %s" % str(nproc))
return downloaded_paths | [
"def",
"download_SRA",
"(",
"self",
",",
"email",
",",
"directory",
"=",
"'series'",
",",
"filterby",
"=",
"None",
",",
"nproc",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"directory",
"==",
"'series'",
":",
"dirpath",
"=",
"os",
".",
"path",... | Download SRA files for each GSM in series.
.. warning::
Do not use parallel option (nproc > 1) in the interactive shell.
For more details see `this issue <https://stackoverflow.com/questions/23641475/multiprocessing-working-in-python-but-not-in-ipython/23641560#23641560>`_
on SO.
Args:
email (:obj:`str`): E-mail that will be provided to the Entrez.
directory (:obj:`str`, optional): Directory to save the data
(defaults to the 'series' which saves the data to the directory
with the name of the series + '_SRA' ending).
Defaults to "series".
filterby (:obj:`str`, optional): Filter GSM objects, argument is a
function that operates on GSM object and return bool
eg. lambda x: "brain" not in x.name. Defaults to None.
nproc (:obj:`int`, optional): Number of processes for SRA download
(default is 1, no parallelization).
**kwargs: Any arbitrary argument passed to GSM.download_SRA
method. See the documentation for more details.
Returns:
:obj:`dict`: A dictionary containing output of ``GSM.download_SRA``
method where each GSM accession ID is the key for the
output. | [
"Download",
"SRA",
"files",
"for",
"each",
"GSM",
"in",
"series",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L897-L964 | train | 205,233 |
guma44/GEOparse | GEOparse/GEOTypes.py | GSE._get_object_as_soft | def _get_object_as_soft(self):
"""Get object as SOFT formatted string."""
soft = []
if self.database is not None:
soft.append(self.database._get_object_as_soft())
soft += ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string()]
for gsm in itervalues(self.gsms):
soft.append(gsm._get_object_as_soft())
for gpl in itervalues(self.gpls):
soft.append(gpl._get_object_as_soft())
return "\n".join(soft) | python | def _get_object_as_soft(self):
"""Get object as SOFT formatted string."""
soft = []
if self.database is not None:
soft.append(self.database._get_object_as_soft())
soft += ["^%s = %s" % (self.geotype, self.name),
self._get_metadata_as_string()]
for gsm in itervalues(self.gsms):
soft.append(gsm._get_object_as_soft())
for gpl in itervalues(self.gpls):
soft.append(gpl._get_object_as_soft())
return "\n".join(soft) | [
"def",
"_get_object_as_soft",
"(",
"self",
")",
":",
"soft",
"=",
"[",
"]",
"if",
"self",
".",
"database",
"is",
"not",
"None",
":",
"soft",
".",
"append",
"(",
"self",
".",
"database",
".",
"_get_object_as_soft",
"(",
")",
")",
"soft",
"+=",
"[",
"\... | Get object as SOFT formatted string. | [
"Get",
"object",
"as",
"SOFT",
"formatted",
"string",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOTypes.py#L966-L978 | train | 205,234 |
guma44/GEOparse | GEOparse/downloader.py | Downloader.destination | def destination(self):
"""Get the destination path.
This is the property should be calculated every time it is used because
a user could change the outdir and filename dynamically.
"""
return os.path.join(os.path.abspath(self.outdir), self.filename) | python | def destination(self):
"""Get the destination path.
This is the property should be calculated every time it is used because
a user could change the outdir and filename dynamically.
"""
return os.path.join(os.path.abspath(self.outdir), self.filename) | [
"def",
"destination",
"(",
"self",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"self",
".",
"outdir",
")",
",",
"self",
".",
"filename",
")"
] | Get the destination path.
This is the property should be calculated every time it is used because
a user could change the outdir and filename dynamically. | [
"Get",
"the",
"destination",
"path",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/downloader.py#L37-L43 | train | 205,235 |
guma44/GEOparse | GEOparse/downloader.py | Downloader.download | def download(self, force=False, silent=False):
"""Download from URL."""
def _download():
if self.url.startswith("http"):
self._download_http(silent=silent)
elif self.url.startswith("ftp"):
self._download_ftp(silent=silent)
else:
raise ValueError("Invalid URL %s" % self.url)
logger.debug("Moving %s to %s" % (
self._temp_file_name,
self.destination))
shutil.move(self._temp_file_name, self.destination)
logger.debug("Successfully downloaded %s" % self.url)
try:
is_already_downloaded = os.path.isfile(self.destination)
if is_already_downloaded:
if force:
try:
os.remove(self.destination)
except Exception:
logger.error("Cannot delete %s" % self.destination)
logger.info(
"Downloading %s to %s" % (self.url, self.destination))
logger.debug(
"Downloading %s to %s" % (self.url,
self._temp_file_name))
_download()
else:
logger.info(("File %s already exist. Use force=True if you"
" would like to overwrite it.") %
self.destination)
else:
_download()
finally:
try:
os.remove(self._temp_file_name)
except OSError:
pass | python | def download(self, force=False, silent=False):
"""Download from URL."""
def _download():
if self.url.startswith("http"):
self._download_http(silent=silent)
elif self.url.startswith("ftp"):
self._download_ftp(silent=silent)
else:
raise ValueError("Invalid URL %s" % self.url)
logger.debug("Moving %s to %s" % (
self._temp_file_name,
self.destination))
shutil.move(self._temp_file_name, self.destination)
logger.debug("Successfully downloaded %s" % self.url)
try:
is_already_downloaded = os.path.isfile(self.destination)
if is_already_downloaded:
if force:
try:
os.remove(self.destination)
except Exception:
logger.error("Cannot delete %s" % self.destination)
logger.info(
"Downloading %s to %s" % (self.url, self.destination))
logger.debug(
"Downloading %s to %s" % (self.url,
self._temp_file_name))
_download()
else:
logger.info(("File %s already exist. Use force=True if you"
" would like to overwrite it.") %
self.destination)
else:
_download()
finally:
try:
os.remove(self._temp_file_name)
except OSError:
pass | [
"def",
"download",
"(",
"self",
",",
"force",
"=",
"False",
",",
"silent",
"=",
"False",
")",
":",
"def",
"_download",
"(",
")",
":",
"if",
"self",
".",
"url",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"self",
".",
"_download_http",
"(",
"silent"... | Download from URL. | [
"Download",
"from",
"URL",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/downloader.py#L45-L84 | train | 205,236 |
guma44/GEOparse | GEOparse/downloader.py | Downloader.download_aspera | def download_aspera(self, user, host, silent=False):
"""Download file with Aspera Connect.
For details see the documentation ov Aspera Connect
Args:
user (:obj:`str`): FTP user.
host (:obj:`str`): FTP host. Defaults to "ftp-trace.ncbi.nlm.nih.gov".
"""
aspera_home = os.environ.get("ASPERA_HOME", None)
if not aspera_home:
raise ValueError("environment variable $ASPERA_HOME not set")
if not os.path.exists(aspera_home):
raise ValueError(
"$ASPERA_HOME directory {} does not exist".format(aspera_home))
ascp = os.path.join(aspera_home, "connect/bin/ascp")
key = os.path.join(aspera_home, "connect/etc/asperaweb_id_dsa.openssh")
if not os.path.exists(ascp):
raise ValueError("could not find ascp binary")
if not os.path.exists(key):
raise ValueError("could not find openssh key")
parsed_url = urlparse(self.url)
cmd = "{} -i {} -k1 -T -l400m {}@{}:{} {}".format(
ascp, key, user, host, parsed_url.path, self._temp_file_name)
logger.debug(cmd)
try:
pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = pr.communicate()
if not silent:
logger.debug("Aspera stdout: " + str(stdout))
logger.debug("Aspera stderr: " + str(stderr))
if pr.returncode == 0:
logger.debug("Moving %s to %s" % (
self._temp_file_name,
self.destination))
shutil.move(self._temp_file_name, self.destination)
logger.debug("Successfully downloaded %s" % self.url)
else:
logger.error(
"Failed to download %s using Aspera Connect" % self.url)
finally:
try:
os.remove(self._temp_file_name)
except OSError:
pass | python | def download_aspera(self, user, host, silent=False):
"""Download file with Aspera Connect.
For details see the documentation ov Aspera Connect
Args:
user (:obj:`str`): FTP user.
host (:obj:`str`): FTP host. Defaults to "ftp-trace.ncbi.nlm.nih.gov".
"""
aspera_home = os.environ.get("ASPERA_HOME", None)
if not aspera_home:
raise ValueError("environment variable $ASPERA_HOME not set")
if not os.path.exists(aspera_home):
raise ValueError(
"$ASPERA_HOME directory {} does not exist".format(aspera_home))
ascp = os.path.join(aspera_home, "connect/bin/ascp")
key = os.path.join(aspera_home, "connect/etc/asperaweb_id_dsa.openssh")
if not os.path.exists(ascp):
raise ValueError("could not find ascp binary")
if not os.path.exists(key):
raise ValueError("could not find openssh key")
parsed_url = urlparse(self.url)
cmd = "{} -i {} -k1 -T -l400m {}@{}:{} {}".format(
ascp, key, user, host, parsed_url.path, self._temp_file_name)
logger.debug(cmd)
try:
pr = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = pr.communicate()
if not silent:
logger.debug("Aspera stdout: " + str(stdout))
logger.debug("Aspera stderr: " + str(stderr))
if pr.returncode == 0:
logger.debug("Moving %s to %s" % (
self._temp_file_name,
self.destination))
shutil.move(self._temp_file_name, self.destination)
logger.debug("Successfully downloaded %s" % self.url)
else:
logger.error(
"Failed to download %s using Aspera Connect" % self.url)
finally:
try:
os.remove(self._temp_file_name)
except OSError:
pass | [
"def",
"download_aspera",
"(",
"self",
",",
"user",
",",
"host",
",",
"silent",
"=",
"False",
")",
":",
"aspera_home",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"ASPERA_HOME\"",
",",
"None",
")",
"if",
"not",
"aspera_home",
":",
"raise",
"ValueError"... | Download file with Aspera Connect.
For details see the documentation ov Aspera Connect
Args:
user (:obj:`str`): FTP user.
host (:obj:`str`): FTP host. Defaults to "ftp-trace.ncbi.nlm.nih.gov". | [
"Download",
"file",
"with",
"Aspera",
"Connect",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/downloader.py#L86-L132 | train | 205,237 |
guma44/GEOparse | GEOparse/downloader.py | Downloader.md5sum | def md5sum(filename, blocksize=8192):
"""Get the MD5 checksum of a file."""
with open(filename, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest() | python | def md5sum(filename, blocksize=8192):
"""Get the MD5 checksum of a file."""
with open(filename, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(blocksize)
if not data:
break
m.update(data)
return m.hexdigest() | [
"def",
"md5sum",
"(",
"filename",
",",
"blocksize",
"=",
"8192",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"fh",
":",
"m",
"=",
"hashlib",
".",
"md5",
"(",
")",
"while",
"True",
":",
"data",
"=",
"fh",
".",
"read",
"(",
... | Get the MD5 checksum of a file. | [
"Get",
"the",
"MD5",
"checksum",
"of",
"a",
"file",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/downloader.py#L227-L236 | train | 205,238 |
guma44/GEOparse | GEOparse/GEOparse.py | get_GEO | def get_GEO(geo=None, filepath=None, destdir="./", how='full',
annotate_gpl=False, geotype=None, include_data=False, silent=False,
aspera=False, partial=None):
"""Get the GEO entry.
The GEO entry is taken directly from the GEO database or read it from SOFT
file.
Args:
geo (:obj:`str`): GEO database identifier.
filepath (:obj:`str`): Path to local SOFT file. Defaults to None.
destdir (:obj:`str`, optional): Directory to download data. Defaults to
None.
how (:obj:`str`, optional): GSM download mode. Defaults to "full".
annotate_gpl (:obj:`bool`, optional): Download the GPL annotation
instead of regular GPL. If not available, fallback to regular GPL
file. Defaults to False.
geotype (:obj:`str`, optional): Type of GEO entry. By default it is
inferred from the ID or the file name.
include_data (:obj:`bool`, optional): Full download of GPLs including
series and samples. Defaults to False.
silent (:obj:`bool`, optional): Do not print anything. Defaults to
False.
aspera (:obj:`bool`, optional): EXPERIMENTAL Download using Aspera
Connect. Follow Aspera instructions for further details. Defaults
to False.
partial (:obj:'iterable', optional): A list of accession IDs of GSMs
to be partially extracted from GPL, works only if a file/accession
is a GPL.
Returns:
:obj:`GEOparse.BaseGEO`: A GEO object of given type.
"""
if geo is None and filepath is None:
raise Exception("You have to specify filename or GEO accession!")
if geo is not None and filepath is not None:
raise Exception("You can specify filename or GEO accession - not both!")
if silent:
logger.setLevel(100) # More than critical
if filepath is None:
filepath, geotype = get_GEO_file(geo, destdir=destdir, how=how,
annotate_gpl=annotate_gpl,
include_data=include_data,
silent=silent,
aspera=aspera)
else:
if geotype is None:
geotype = path.basename(filepath)[:3]
logger.info("Parsing %s: " % filepath)
if geotype.upper() == "GSM":
return parse_GSM(filepath)
elif geotype.upper() == "GSE":
return parse_GSE(filepath)
elif geotype.upper() == 'GPL':
return parse_GPL(filepath, partial=partial)
elif geotype.upper() == 'GDS':
return parse_GDS(filepath)
else:
raise ValueError(("Unknown GEO type: %s. Available types: GSM, GSE, "
"GPL and GDS.") % geotype.upper()) | python | def get_GEO(geo=None, filepath=None, destdir="./", how='full',
annotate_gpl=False, geotype=None, include_data=False, silent=False,
aspera=False, partial=None):
"""Get the GEO entry.
The GEO entry is taken directly from the GEO database or read it from SOFT
file.
Args:
geo (:obj:`str`): GEO database identifier.
filepath (:obj:`str`): Path to local SOFT file. Defaults to None.
destdir (:obj:`str`, optional): Directory to download data. Defaults to
None.
how (:obj:`str`, optional): GSM download mode. Defaults to "full".
annotate_gpl (:obj:`bool`, optional): Download the GPL annotation
instead of regular GPL. If not available, fallback to regular GPL
file. Defaults to False.
geotype (:obj:`str`, optional): Type of GEO entry. By default it is
inferred from the ID or the file name.
include_data (:obj:`bool`, optional): Full download of GPLs including
series and samples. Defaults to False.
silent (:obj:`bool`, optional): Do not print anything. Defaults to
False.
aspera (:obj:`bool`, optional): EXPERIMENTAL Download using Aspera
Connect. Follow Aspera instructions for further details. Defaults
to False.
partial (:obj:'iterable', optional): A list of accession IDs of GSMs
to be partially extracted from GPL, works only if a file/accession
is a GPL.
Returns:
:obj:`GEOparse.BaseGEO`: A GEO object of given type.
"""
if geo is None and filepath is None:
raise Exception("You have to specify filename or GEO accession!")
if geo is not None and filepath is not None:
raise Exception("You can specify filename or GEO accession - not both!")
if silent:
logger.setLevel(100) # More than critical
if filepath is None:
filepath, geotype = get_GEO_file(geo, destdir=destdir, how=how,
annotate_gpl=annotate_gpl,
include_data=include_data,
silent=silent,
aspera=aspera)
else:
if geotype is None:
geotype = path.basename(filepath)[:3]
logger.info("Parsing %s: " % filepath)
if geotype.upper() == "GSM":
return parse_GSM(filepath)
elif geotype.upper() == "GSE":
return parse_GSE(filepath)
elif geotype.upper() == 'GPL':
return parse_GPL(filepath, partial=partial)
elif geotype.upper() == 'GDS':
return parse_GDS(filepath)
else:
raise ValueError(("Unknown GEO type: %s. Available types: GSM, GSE, "
"GPL and GDS.") % geotype.upper()) | [
"def",
"get_GEO",
"(",
"geo",
"=",
"None",
",",
"filepath",
"=",
"None",
",",
"destdir",
"=",
"\"./\"",
",",
"how",
"=",
"'full'",
",",
"annotate_gpl",
"=",
"False",
",",
"geotype",
"=",
"None",
",",
"include_data",
"=",
"False",
",",
"silent",
"=",
... | Get the GEO entry.
The GEO entry is taken directly from the GEO database or read it from SOFT
file.
Args:
geo (:obj:`str`): GEO database identifier.
filepath (:obj:`str`): Path to local SOFT file. Defaults to None.
destdir (:obj:`str`, optional): Directory to download data. Defaults to
None.
how (:obj:`str`, optional): GSM download mode. Defaults to "full".
annotate_gpl (:obj:`bool`, optional): Download the GPL annotation
instead of regular GPL. If not available, fallback to regular GPL
file. Defaults to False.
geotype (:obj:`str`, optional): Type of GEO entry. By default it is
inferred from the ID or the file name.
include_data (:obj:`bool`, optional): Full download of GPLs including
series and samples. Defaults to False.
silent (:obj:`bool`, optional): Do not print anything. Defaults to
False.
aspera (:obj:`bool`, optional): EXPERIMENTAL Download using Aspera
Connect. Follow Aspera instructions for further details. Defaults
to False.
partial (:obj:'iterable', optional): A list of accession IDs of GSMs
to be partially extracted from GPL, works only if a file/accession
is a GPL.
Returns:
:obj:`GEOparse.BaseGEO`: A GEO object of given type. | [
"Get",
"the",
"GEO",
"entry",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L31-L95 | train | 205,239 |
guma44/GEOparse | GEOparse/GEOparse.py | parse_metadata | def parse_metadata(lines):
"""Parse list of lines with metadata information from SOFT file.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`dict`: Metadata from SOFT file.
"""
meta = defaultdict(list)
for line in lines:
line = line.rstrip()
if line.startswith("!"):
if "_table_begin" in line or "_table_end" in line:
continue
key, value = __parse_entry(line)
meta[key].append(value)
return dict(meta) | python | def parse_metadata(lines):
"""Parse list of lines with metadata information from SOFT file.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`dict`: Metadata from SOFT file.
"""
meta = defaultdict(list)
for line in lines:
line = line.rstrip()
if line.startswith("!"):
if "_table_begin" in line or "_table_end" in line:
continue
key, value = __parse_entry(line)
meta[key].append(value)
return dict(meta) | [
"def",
"parse_metadata",
"(",
"lines",
")",
":",
"meta",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"line",
"in",
"lines",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"\"!\"",
")",
":",
"if",
"\"_table_b... | Parse list of lines with metadata information from SOFT file.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`dict`: Metadata from SOFT file. | [
"Parse",
"list",
"of",
"lines",
"with",
"metadata",
"information",
"from",
"SOFT",
"file",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L244-L263 | train | 205,240 |
guma44/GEOparse | GEOparse/GEOparse.py | parse_columns | def parse_columns(lines):
"""Parse list of lines with columns description from SOFT file.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Columns description.
"""
data = []
index = []
for line in lines:
line = line.rstrip()
if line.startswith("#"):
tmp = __parse_entry(line)
data.append(tmp[1])
index.append(tmp[0])
return DataFrame(data, index=index, columns=['description']) | python | def parse_columns(lines):
"""Parse list of lines with columns description from SOFT file.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Columns description.
"""
data = []
index = []
for line in lines:
line = line.rstrip()
if line.startswith("#"):
tmp = __parse_entry(line)
data.append(tmp[1])
index.append(tmp[0])
return DataFrame(data, index=index, columns=['description']) | [
"def",
"parse_columns",
"(",
"lines",
")",
":",
"data",
"=",
"[",
"]",
"index",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"tmp",
"=",
... | Parse list of lines with columns description from SOFT file.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Columns description. | [
"Parse",
"list",
"of",
"lines",
"with",
"columns",
"description",
"from",
"SOFT",
"file",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L266-L285 | train | 205,241 |
guma44/GEOparse | GEOparse/GEOparse.py | parse_GDS_columns | def parse_GDS_columns(lines, subsets):
"""Parse list of line with columns description from SOFT file of GDS.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
subsets (:obj:`dict` of :obj:`GEOparse.GDSSubset`): Subsets to use.
Returns:
:obj:`pandas.DataFrame`: Columns description.
"""
data = []
index = []
for line in lines:
line = line.rstrip()
if line.startswith("#"):
tmp = __parse_entry(line)
data.append(tmp[1])
index.append(tmp[0])
df = DataFrame(data, index=index, columns=['description'])
subset_ids = defaultdict(dict)
for subsetname, subset in iteritems(subsets):
for expid in subset.metadata["sample_id"][0].split(","):
try:
subset_type = subset.get_type()
subset_ids[subset_type][expid] = \
subset.metadata['description'][0]
except Exception as err:
logger.error("Error processing subsets: %s for subset %s" % (
subset.get_type(), subsetname))
return df.join(DataFrame(subset_ids)) | python | def parse_GDS_columns(lines, subsets):
"""Parse list of line with columns description from SOFT file of GDS.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
subsets (:obj:`dict` of :obj:`GEOparse.GDSSubset`): Subsets to use.
Returns:
:obj:`pandas.DataFrame`: Columns description.
"""
data = []
index = []
for line in lines:
line = line.rstrip()
if line.startswith("#"):
tmp = __parse_entry(line)
data.append(tmp[1])
index.append(tmp[0])
df = DataFrame(data, index=index, columns=['description'])
subset_ids = defaultdict(dict)
for subsetname, subset in iteritems(subsets):
for expid in subset.metadata["sample_id"][0].split(","):
try:
subset_type = subset.get_type()
subset_ids[subset_type][expid] = \
subset.metadata['description'][0]
except Exception as err:
logger.error("Error processing subsets: %s for subset %s" % (
subset.get_type(), subsetname))
return df.join(DataFrame(subset_ids)) | [
"def",
"parse_GDS_columns",
"(",
"lines",
",",
"subsets",
")",
":",
"data",
"=",
"[",
"]",
"index",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"line",
"=",
"line",
".",
"rstrip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
... | Parse list of line with columns description from SOFT file of GDS.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
subsets (:obj:`dict` of :obj:`GEOparse.GDSSubset`): Subsets to use.
Returns:
:obj:`pandas.DataFrame`: Columns description. | [
"Parse",
"list",
"of",
"line",
"with",
"columns",
"description",
"from",
"SOFT",
"file",
"of",
"GDS",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L288-L320 | train | 205,242 |
guma44/GEOparse | GEOparse/GEOparse.py | parse_table_data | def parse_table_data(lines):
""""Parse list of lines from SOFT file into DataFrame.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Table data.
"""
# filter lines that do not start with symbols
data = "\n".join([i.rstrip() for i in lines
if not i.startswith(("^", "!", "#")) and i.rstrip()])
if data:
return read_csv(StringIO(data), index_col=None, sep="\t")
else:
return DataFrame() | python | def parse_table_data(lines):
""""Parse list of lines from SOFT file into DataFrame.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Table data.
"""
# filter lines that do not start with symbols
data = "\n".join([i.rstrip() for i in lines
if not i.startswith(("^", "!", "#")) and i.rstrip()])
if data:
return read_csv(StringIO(data), index_col=None, sep="\t")
else:
return DataFrame() | [
"def",
"parse_table_data",
"(",
"lines",
")",
":",
"# filter lines that do not start with symbols",
"data",
"=",
"\"\\n\"",
".",
"join",
"(",
"[",
"i",
".",
"rstrip",
"(",
")",
"for",
"i",
"in",
"lines",
"if",
"not",
"i",
".",
"startswith",
"(",
"(",
"\"^\... | Parse list of lines from SOFT file into DataFrame.
Args:
lines (:obj:`Iterable`): Iterator over the lines.
Returns:
:obj:`pandas.DataFrame`: Table data. | [
"Parse",
"list",
"of",
"lines",
"from",
"SOFT",
"file",
"into",
"DataFrame",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L323-L339 | train | 205,243 |
guma44/GEOparse | GEOparse/GEOparse.py | parse_GSM | def parse_GSM(filepath, entry_name=None):
"""Parse GSM entry from SOFT file.
Args:
filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry
or list of lines representing GSM from GSE file.
entry_name (:obj:`str`, optional): Name of the entry. By default it is
inferred from the data.
Returns:
:obj:`GEOparse.GSM`: A GSM object.
"""
if isinstance(filepath, str):
with utils.smart_open(filepath) as f:
soft = []
has_table = False
for line in f:
if "_table_begin" in line or (not line.startswith(("^", "!", "#"))):
has_table = True
soft.append(line.rstrip())
else:
soft = []
has_table = False
for line in filepath:
if "_table_begin" in line or (not line.startswith(("^", "!", "#"))):
has_table = True
soft.append(line.rstrip())
if entry_name is None:
sets = [i for i in soft if i.startswith("^")]
if len(sets) > 1:
raise Exception("More than one entry in GPL")
if len(sets) == 0:
raise NoEntriesException(
"No entries found. Check the if accession is correct!")
entry_name = parse_entry_name(sets[0])
columns = parse_columns(soft)
metadata = parse_metadata(soft)
if has_table:
table_data = parse_table_data(soft)
else:
table_data = DataFrame()
gsm = GSM(name=entry_name,
table=table_data,
metadata=metadata,
columns=columns)
return gsm | python | def parse_GSM(filepath, entry_name=None):
"""Parse GSM entry from SOFT file.
Args:
filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry
or list of lines representing GSM from GSE file.
entry_name (:obj:`str`, optional): Name of the entry. By default it is
inferred from the data.
Returns:
:obj:`GEOparse.GSM`: A GSM object.
"""
if isinstance(filepath, str):
with utils.smart_open(filepath) as f:
soft = []
has_table = False
for line in f:
if "_table_begin" in line or (not line.startswith(("^", "!", "#"))):
has_table = True
soft.append(line.rstrip())
else:
soft = []
has_table = False
for line in filepath:
if "_table_begin" in line or (not line.startswith(("^", "!", "#"))):
has_table = True
soft.append(line.rstrip())
if entry_name is None:
sets = [i for i in soft if i.startswith("^")]
if len(sets) > 1:
raise Exception("More than one entry in GPL")
if len(sets) == 0:
raise NoEntriesException(
"No entries found. Check the if accession is correct!")
entry_name = parse_entry_name(sets[0])
columns = parse_columns(soft)
metadata = parse_metadata(soft)
if has_table:
table_data = parse_table_data(soft)
else:
table_data = DataFrame()
gsm = GSM(name=entry_name,
table=table_data,
metadata=metadata,
columns=columns)
return gsm | [
"def",
"parse_GSM",
"(",
"filepath",
",",
"entry_name",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"filepath",
",",
"str",
")",
":",
"with",
"utils",
".",
"smart_open",
"(",
"filepath",
")",
"as",
"f",
":",
"soft",
"=",
"[",
"]",
"has_table",
"=... | Parse GSM entry from SOFT file.
Args:
filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry
or list of lines representing GSM from GSE file.
entry_name (:obj:`str`, optional): Name of the entry. By default it is
inferred from the data.
Returns:
:obj:`GEOparse.GSM`: A GSM object. | [
"Parse",
"GSM",
"entry",
"from",
"SOFT",
"file",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L342-L392 | train | 205,244 |
guma44/GEOparse | GEOparse/GEOparse.py | parse_GPL | def parse_GPL(filepath, entry_name=None, partial=None):
"""Parse GPL entry from SOFT file.
Args:
filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GPL entry
or list of lines representing GPL from GSE file.
entry_name (:obj:`str`, optional): Name of the entry. By default it is
inferred from the data.
partial (:obj:'iterable', optional): A list of accession IDs of GSMs
to be partially extracted from GPL, works only if a file/accession
is a GPL.
Returns:
:obj:`GEOparse.GPL`: A GPL object.
"""
gsms = {}
gses = {}
gpl_soft = []
has_table = False
gpl_name = entry_name
database = None
if isinstance(filepath, str):
with utils.smart_open(filepath) as soft:
groupper = groupby(soft, lambda x: x.startswith("^"))
for is_new_entry, group in groupper:
if is_new_entry:
entry_type, entry_name = __parse_entry(next(group))
logger.debug("%s: %s" % (entry_type.upper(), entry_name))
if entry_type == "SERIES":
is_data, data_group = next(groupper)
gse_metadata = parse_metadata(data_group)
gses[entry_name] = GSE(name=entry_name,
metadata=gse_metadata)
elif entry_type == "SAMPLE":
if partial and entry_name not in partial:
continue
is_data, data_group = next(groupper)
gsms[entry_name] = parse_GSM(data_group, entry_name)
elif entry_type == "DATABASE":
is_data, data_group = next(groupper)
database_metadata = parse_metadata(data_group)
database = GEODatabase(name=entry_name,
metadata=database_metadata)
elif entry_type == "PLATFORM" or entry_type == "Annotation":
gpl_name = entry_name
is_data, data_group = next(groupper)
has_gpl_name = gpl_name or gpl_name is None
for line in data_group:
if ("_table_begin" in line or
not line.startswith(("^", "!", "#"))):
has_table = True
if not has_gpl_name:
if match("!Annotation_platform\s*=\s*", line):
gpl_name = split("\s*=\s*", line)[-1].strip()
has_gpl_name = True
gpl_soft.append(line)
else:
raise RuntimeError(
"Cannot parse {etype}. Unknown for GPL.".format(
etype=entry_type
))
else:
for line in filepath:
if "_table_begin" in line or (not line.startswith(("^", "!", "#"))):
has_table = True
gpl_soft.append(line.rstrip())
columns = None
try:
columns = parse_columns(gpl_soft)
except Exception:
pass
metadata = parse_metadata(gpl_soft)
if has_table:
table_data = parse_table_data(gpl_soft)
else:
table_data = DataFrame()
gpl = GPL(name=gpl_name,
gses=gses,
gsms=gsms,
table=table_data,
metadata=metadata,
columns=columns,
database=database
)
# link samples to series, if these were present in the GPL soft file
for gse_id, gse in gpl.gses.items():
for gsm_id in gse.metadata.get("sample_id", []):
if gsm_id in gpl.gsms:
gpl.gses[gse_id].gsms[gsm_id] = gpl.gsms[gsm_id]
return gpl | python | def parse_GPL(filepath, entry_name=None, partial=None):
"""Parse GPL entry from SOFT file.
Args:
filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GPL entry
or list of lines representing GPL from GSE file.
entry_name (:obj:`str`, optional): Name of the entry. By default it is
inferred from the data.
partial (:obj:'iterable', optional): A list of accession IDs of GSMs
to be partially extracted from GPL, works only if a file/accession
is a GPL.
Returns:
:obj:`GEOparse.GPL`: A GPL object.
"""
gsms = {}
gses = {}
gpl_soft = []
has_table = False
gpl_name = entry_name
database = None
if isinstance(filepath, str):
with utils.smart_open(filepath) as soft:
groupper = groupby(soft, lambda x: x.startswith("^"))
for is_new_entry, group in groupper:
if is_new_entry:
entry_type, entry_name = __parse_entry(next(group))
logger.debug("%s: %s" % (entry_type.upper(), entry_name))
if entry_type == "SERIES":
is_data, data_group = next(groupper)
gse_metadata = parse_metadata(data_group)
gses[entry_name] = GSE(name=entry_name,
metadata=gse_metadata)
elif entry_type == "SAMPLE":
if partial and entry_name not in partial:
continue
is_data, data_group = next(groupper)
gsms[entry_name] = parse_GSM(data_group, entry_name)
elif entry_type == "DATABASE":
is_data, data_group = next(groupper)
database_metadata = parse_metadata(data_group)
database = GEODatabase(name=entry_name,
metadata=database_metadata)
elif entry_type == "PLATFORM" or entry_type == "Annotation":
gpl_name = entry_name
is_data, data_group = next(groupper)
has_gpl_name = gpl_name or gpl_name is None
for line in data_group:
if ("_table_begin" in line or
not line.startswith(("^", "!", "#"))):
has_table = True
if not has_gpl_name:
if match("!Annotation_platform\s*=\s*", line):
gpl_name = split("\s*=\s*", line)[-1].strip()
has_gpl_name = True
gpl_soft.append(line)
else:
raise RuntimeError(
"Cannot parse {etype}. Unknown for GPL.".format(
etype=entry_type
))
else:
for line in filepath:
if "_table_begin" in line or (not line.startswith(("^", "!", "#"))):
has_table = True
gpl_soft.append(line.rstrip())
columns = None
try:
columns = parse_columns(gpl_soft)
except Exception:
pass
metadata = parse_metadata(gpl_soft)
if has_table:
table_data = parse_table_data(gpl_soft)
else:
table_data = DataFrame()
gpl = GPL(name=gpl_name,
gses=gses,
gsms=gsms,
table=table_data,
metadata=metadata,
columns=columns,
database=database
)
# link samples to series, if these were present in the GPL soft file
for gse_id, gse in gpl.gses.items():
for gsm_id in gse.metadata.get("sample_id", []):
if gsm_id in gpl.gsms:
gpl.gses[gse_id].gsms[gsm_id] = gpl.gsms[gsm_id]
return gpl | [
"def",
"parse_GPL",
"(",
"filepath",
",",
"entry_name",
"=",
"None",
",",
"partial",
"=",
"None",
")",
":",
"gsms",
"=",
"{",
"}",
"gses",
"=",
"{",
"}",
"gpl_soft",
"=",
"[",
"]",
"has_table",
"=",
"False",
"gpl_name",
"=",
"entry_name",
"database",
... | Parse GPL entry from SOFT file.
Args:
filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GPL entry
or list of lines representing GPL from GSE file.
entry_name (:obj:`str`, optional): Name of the entry. By default it is
inferred from the data.
partial (:obj:'iterable', optional): A list of accession IDs of GSMs
to be partially extracted from GPL, works only if a file/accession
is a GPL.
Returns:
:obj:`GEOparse.GPL`: A GPL object. | [
"Parse",
"GPL",
"entry",
"from",
"SOFT",
"file",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L395-L492 | train | 205,245 |
guma44/GEOparse | GEOparse/GEOparse.py | parse_GSE | def parse_GSE(filepath):
"""Parse GSE SOFT file.
Args:
filepath (:obj:`str`): Path to GSE SOFT file.
Returns:
:obj:`GEOparse.GSE`: A GSE object.
"""
gpls = {}
gsms = {}
series_counter = 0
database = None
metadata = {}
gse_name = None
with utils.smart_open(filepath) as soft:
groupper = groupby(soft, lambda x: x.startswith("^"))
for is_new_entry, group in groupper:
if is_new_entry:
entry_type, entry_name = __parse_entry(next(group))
logger.debug("%s: %s" % (entry_type.upper(), entry_name))
if entry_type == "SERIES":
gse_name = entry_name
series_counter += 1
if series_counter > 1:
raise Exception(
"GSE file should contain only one series entry!")
is_data, data_group = next(groupper)
message = ("The key is not False, probably there is an "
"error in the SOFT file")
assert not is_data, message
metadata = parse_metadata(data_group)
elif entry_type == "SAMPLE":
is_data, data_group = next(groupper)
gsms[entry_name] = parse_GSM(data_group, entry_name)
elif entry_type == "PLATFORM":
is_data, data_group = next(groupper)
gpls[entry_name] = parse_GPL(data_group, entry_name)
elif entry_type == "DATABASE":
is_data, data_group = next(groupper)
database_metadata = parse_metadata(data_group)
database = GEODatabase(name=entry_name,
metadata=database_metadata)
else:
logger.error("Cannot recognize type %s" % entry_type)
gse = GSE(name=gse_name,
metadata=metadata,
gpls=gpls,
gsms=gsms,
database=database)
return gse | python | def parse_GSE(filepath):
"""Parse GSE SOFT file.
Args:
filepath (:obj:`str`): Path to GSE SOFT file.
Returns:
:obj:`GEOparse.GSE`: A GSE object.
"""
gpls = {}
gsms = {}
series_counter = 0
database = None
metadata = {}
gse_name = None
with utils.smart_open(filepath) as soft:
groupper = groupby(soft, lambda x: x.startswith("^"))
for is_new_entry, group in groupper:
if is_new_entry:
entry_type, entry_name = __parse_entry(next(group))
logger.debug("%s: %s" % (entry_type.upper(), entry_name))
if entry_type == "SERIES":
gse_name = entry_name
series_counter += 1
if series_counter > 1:
raise Exception(
"GSE file should contain only one series entry!")
is_data, data_group = next(groupper)
message = ("The key is not False, probably there is an "
"error in the SOFT file")
assert not is_data, message
metadata = parse_metadata(data_group)
elif entry_type == "SAMPLE":
is_data, data_group = next(groupper)
gsms[entry_name] = parse_GSM(data_group, entry_name)
elif entry_type == "PLATFORM":
is_data, data_group = next(groupper)
gpls[entry_name] = parse_GPL(data_group, entry_name)
elif entry_type == "DATABASE":
is_data, data_group = next(groupper)
database_metadata = parse_metadata(data_group)
database = GEODatabase(name=entry_name,
metadata=database_metadata)
else:
logger.error("Cannot recognize type %s" % entry_type)
gse = GSE(name=gse_name,
metadata=metadata,
gpls=gpls,
gsms=gsms,
database=database)
return gse | [
"def",
"parse_GSE",
"(",
"filepath",
")",
":",
"gpls",
"=",
"{",
"}",
"gsms",
"=",
"{",
"}",
"series_counter",
"=",
"0",
"database",
"=",
"None",
"metadata",
"=",
"{",
"}",
"gse_name",
"=",
"None",
"with",
"utils",
".",
"smart_open",
"(",
"filepath",
... | Parse GSE SOFT file.
Args:
filepath (:obj:`str`): Path to GSE SOFT file.
Returns:
:obj:`GEOparse.GSE`: A GSE object. | [
"Parse",
"GSE",
"SOFT",
"file",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L495-L546 | train | 205,246 |
guma44/GEOparse | GEOparse/GEOparse.py | parse_GDS | def parse_GDS(filepath):
"""Parse GDS SOFT file.
Args:
filepath (:obj:`str`): Path to GDS SOFT file.
Returns:
:obj:`GEOparse.GDS`: A GDS object.
"""
dataset_lines = []
subsets = {}
database = None
dataset_name = None
with utils.smart_open(filepath) as soft:
groupper = groupby(soft, lambda x: x.startswith("^"))
for is_new_entry, group in groupper:
if is_new_entry:
entry_type, entry_name = __parse_entry(next(group))
logger.debug("%s: %s" % (entry_type.upper(), entry_name))
if entry_type == "SUBSET":
is_data, data_group = next(groupper)
message = ("The key is not False, probably there is an "
"error in the SOFT file")
assert not is_data, message
subset_metadata = parse_metadata(data_group)
subsets[entry_name] = GDSSubset(name=entry_name,
metadata=subset_metadata)
elif entry_type == "DATABASE":
is_data, data_group = next(groupper)
message = ("The key is not False, probably there is an "
"error in the SOFT file")
assert not is_data, message
database_metadata = parse_metadata(data_group)
database = GEODatabase(name=entry_name,
metadata=database_metadata)
elif entry_type == "DATASET":
is_data, data_group = next(groupper)
dataset_name = entry_name
for line in data_group:
dataset_lines.append(line.rstrip())
else:
logger.error("Cannot recognize type %s" % entry_type)
metadata = parse_metadata(dataset_lines)
columns = parse_GDS_columns(dataset_lines, subsets)
table = parse_table_data(dataset_lines)
return GDS(name=dataset_name, metadata=metadata, columns=columns,
table=table, subsets=subsets, database=database) | python | def parse_GDS(filepath):
"""Parse GDS SOFT file.
Args:
filepath (:obj:`str`): Path to GDS SOFT file.
Returns:
:obj:`GEOparse.GDS`: A GDS object.
"""
dataset_lines = []
subsets = {}
database = None
dataset_name = None
with utils.smart_open(filepath) as soft:
groupper = groupby(soft, lambda x: x.startswith("^"))
for is_new_entry, group in groupper:
if is_new_entry:
entry_type, entry_name = __parse_entry(next(group))
logger.debug("%s: %s" % (entry_type.upper(), entry_name))
if entry_type == "SUBSET":
is_data, data_group = next(groupper)
message = ("The key is not False, probably there is an "
"error in the SOFT file")
assert not is_data, message
subset_metadata = parse_metadata(data_group)
subsets[entry_name] = GDSSubset(name=entry_name,
metadata=subset_metadata)
elif entry_type == "DATABASE":
is_data, data_group = next(groupper)
message = ("The key is not False, probably there is an "
"error in the SOFT file")
assert not is_data, message
database_metadata = parse_metadata(data_group)
database = GEODatabase(name=entry_name,
metadata=database_metadata)
elif entry_type == "DATASET":
is_data, data_group = next(groupper)
dataset_name = entry_name
for line in data_group:
dataset_lines.append(line.rstrip())
else:
logger.error("Cannot recognize type %s" % entry_type)
metadata = parse_metadata(dataset_lines)
columns = parse_GDS_columns(dataset_lines, subsets)
table = parse_table_data(dataset_lines)
return GDS(name=dataset_name, metadata=metadata, columns=columns,
table=table, subsets=subsets, database=database) | [
"def",
"parse_GDS",
"(",
"filepath",
")",
":",
"dataset_lines",
"=",
"[",
"]",
"subsets",
"=",
"{",
"}",
"database",
"=",
"None",
"dataset_name",
"=",
"None",
"with",
"utils",
".",
"smart_open",
"(",
"filepath",
")",
"as",
"soft",
":",
"groupper",
"=",
... | Parse GDS SOFT file.
Args:
filepath (:obj:`str`): Path to GDS SOFT file.
Returns:
:obj:`GEOparse.GDS`: A GDS object. | [
"Parse",
"GDS",
"SOFT",
"file",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/GEOparse.py#L549-L598 | train | 205,247 |
guma44/GEOparse | GEOparse/utils.py | download_from_url | def download_from_url(url, destination_path,
force=False, aspera=False, silent=False):
"""Download file from remote server.
If the file is already downloaded and ``force`` flag is on the file will
be removed.
Args:
url (:obj:`str`): Path to the file on remote server (including file
name)
destination_path (:obj:`str`): Path to the file on local machine
(including file name)
force (:obj:`bool`): If file exist force to overwrite it. Defaults to
False.
aspera (:obj:`bool`): Download with Aspera Connect. Defaults to False.
silent (:obj:`bool`): Do not print any message. Defaults to False.
"""
if aspera and url.startswith("http"):
logger.warn("Aspera Connect allows only FTP servers - falling back to "
"normal download")
aspera = False
try:
fn = Downloader(
url,
outdir=os.path.dirname(destination_path))
if aspera:
fn.download_aspera(
user="anonftp",
host="ftp-trace.ncbi.nlm.nih.gov",
silent=silent)
else:
fn.download(silent=silent, force=force)
except URLError:
logger.error("Cannot find file %s" % url) | python | def download_from_url(url, destination_path,
force=False, aspera=False, silent=False):
"""Download file from remote server.
If the file is already downloaded and ``force`` flag is on the file will
be removed.
Args:
url (:obj:`str`): Path to the file on remote server (including file
name)
destination_path (:obj:`str`): Path to the file on local machine
(including file name)
force (:obj:`bool`): If file exist force to overwrite it. Defaults to
False.
aspera (:obj:`bool`): Download with Aspera Connect. Defaults to False.
silent (:obj:`bool`): Do not print any message. Defaults to False.
"""
if aspera and url.startswith("http"):
logger.warn("Aspera Connect allows only FTP servers - falling back to "
"normal download")
aspera = False
try:
fn = Downloader(
url,
outdir=os.path.dirname(destination_path))
if aspera:
fn.download_aspera(
user="anonftp",
host="ftp-trace.ncbi.nlm.nih.gov",
silent=silent)
else:
fn.download(silent=silent, force=force)
except URLError:
logger.error("Cannot find file %s" % url) | [
"def",
"download_from_url",
"(",
"url",
",",
"destination_path",
",",
"force",
"=",
"False",
",",
"aspera",
"=",
"False",
",",
"silent",
"=",
"False",
")",
":",
"if",
"aspera",
"and",
"url",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"logger",
".",
... | Download file from remote server.
If the file is already downloaded and ``force`` flag is on the file will
be removed.
Args:
url (:obj:`str`): Path to the file on remote server (including file
name)
destination_path (:obj:`str`): Path to the file on local machine
(including file name)
force (:obj:`bool`): If file exist force to overwrite it. Defaults to
False.
aspera (:obj:`bool`): Download with Aspera Connect. Defaults to False.
silent (:obj:`bool`): Do not print any message. Defaults to False. | [
"Download",
"file",
"from",
"remote",
"server",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/utils.py#L40-L74 | train | 205,248 |
guma44/GEOparse | GEOparse/utils.py | smart_open | def smart_open(filepath):
"""Open file intelligently depending on the source and python version.
Args:
filepath (:obj:`str`): Path to the file.
Yields:
Context manager for file handle.
"""
if filepath[-2:] == "gz":
mode = "rt"
fopen = gzip.open
else:
mode = "r"
fopen = open
if sys.version_info[0] < 3:
fh = fopen(filepath, mode)
else:
fh = fopen(filepath, mode, errors="ignore")
try:
yield fh
except IOError:
fh.close()
finally:
fh.close() | python | def smart_open(filepath):
"""Open file intelligently depending on the source and python version.
Args:
filepath (:obj:`str`): Path to the file.
Yields:
Context manager for file handle.
"""
if filepath[-2:] == "gz":
mode = "rt"
fopen = gzip.open
else:
mode = "r"
fopen = open
if sys.version_info[0] < 3:
fh = fopen(filepath, mode)
else:
fh = fopen(filepath, mode, errors="ignore")
try:
yield fh
except IOError:
fh.close()
finally:
fh.close() | [
"def",
"smart_open",
"(",
"filepath",
")",
":",
"if",
"filepath",
"[",
"-",
"2",
":",
"]",
"==",
"\"gz\"",
":",
"mode",
"=",
"\"rt\"",
"fopen",
"=",
"gzip",
".",
"open",
"else",
":",
"mode",
"=",
"\"r\"",
"fopen",
"=",
"open",
"if",
"sys",
".",
"... | Open file intelligently depending on the source and python version.
Args:
filepath (:obj:`str`): Path to the file.
Yields:
Context manager for file handle. | [
"Open",
"file",
"intelligently",
"depending",
"on",
"the",
"source",
"and",
"python",
"version",
"."
] | 7ee8d5b8678d780382a6bf884afa69d2033f5ca0 | https://github.com/guma44/GEOparse/blob/7ee8d5b8678d780382a6bf884afa69d2033f5ca0/GEOparse/utils.py#L78-L103 | train | 205,249 |
HDI-Project/BTB | btb/selection/selector.py | Selector.bandit | def bandit(self, choice_rewards):
"""Return the choice to take next using multi-armed bandit
Multi-armed bandit method. Accepts a mapping of choices to rewards which indicate their
historical performance, and returns the choice that we should make next in order to
maximize expected reward in the long term.
The default implementation is to return the arm with the highest average score.
Args:
choice_rewards (Dict[object, List[float]]): maps choice IDs to lists of rewards.
Returns:
str: the name of the choice to take next.
"""
return max(choice_rewards, key=lambda a: np.mean(choice_rewards[a])) | python | def bandit(self, choice_rewards):
"""Return the choice to take next using multi-armed bandit
Multi-armed bandit method. Accepts a mapping of choices to rewards which indicate their
historical performance, and returns the choice that we should make next in order to
maximize expected reward in the long term.
The default implementation is to return the arm with the highest average score.
Args:
choice_rewards (Dict[object, List[float]]): maps choice IDs to lists of rewards.
Returns:
str: the name of the choice to take next.
"""
return max(choice_rewards, key=lambda a: np.mean(choice_rewards[a])) | [
"def",
"bandit",
"(",
"self",
",",
"choice_rewards",
")",
":",
"return",
"max",
"(",
"choice_rewards",
",",
"key",
"=",
"lambda",
"a",
":",
"np",
".",
"mean",
"(",
"choice_rewards",
"[",
"a",
"]",
")",
")"
] | Return the choice to take next using multi-armed bandit
Multi-armed bandit method. Accepts a mapping of choices to rewards which indicate their
historical performance, and returns the choice that we should make next in order to
maximize expected reward in the long term.
The default implementation is to return the arm with the highest average score.
Args:
choice_rewards (Dict[object, List[float]]): maps choice IDs to lists of rewards.
Returns:
str: the name of the choice to take next. | [
"Return",
"the",
"choice",
"to",
"take",
"next",
"using",
"multi",
"-",
"armed",
"bandit"
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/selector.py#L23-L38 | train | 205,250 |
HDI-Project/BTB | btb/selection/selector.py | Selector.select | def select(self, choice_scores):
"""Select the next best choice to make
Args:
choice_scores (Dict[object, List[float]]): Mapping of choice to list of scores for each
possible choice. The caller is responsible for making sure each choice that is
possible at this juncture is represented in the dict, even those with no scores.
Score lists should be in ascending chronological order, that is, the score from the
earliest trial should be listed first.
For example::
{
1: [0.56, 0.61, 0.33, 0.67],
2: [0.25, 0.58],
3: [0.60, 0.65, 0.68],
}
"""
choice_rewards = {}
for choice, scores in choice_scores.items():
if choice not in self.choices:
continue
choice_rewards[choice] = self.compute_rewards(scores)
return self.bandit(choice_rewards) | python | def select(self, choice_scores):
"""Select the next best choice to make
Args:
choice_scores (Dict[object, List[float]]): Mapping of choice to list of scores for each
possible choice. The caller is responsible for making sure each choice that is
possible at this juncture is represented in the dict, even those with no scores.
Score lists should be in ascending chronological order, that is, the score from the
earliest trial should be listed first.
For example::
{
1: [0.56, 0.61, 0.33, 0.67],
2: [0.25, 0.58],
3: [0.60, 0.65, 0.68],
}
"""
choice_rewards = {}
for choice, scores in choice_scores.items():
if choice not in self.choices:
continue
choice_rewards[choice] = self.compute_rewards(scores)
return self.bandit(choice_rewards) | [
"def",
"select",
"(",
"self",
",",
"choice_scores",
")",
":",
"choice_rewards",
"=",
"{",
"}",
"for",
"choice",
",",
"scores",
"in",
"choice_scores",
".",
"items",
"(",
")",
":",
"if",
"choice",
"not",
"in",
"self",
".",
"choices",
":",
"continue",
"ch... | Select the next best choice to make
Args:
choice_scores (Dict[object, List[float]]): Mapping of choice to list of scores for each
possible choice. The caller is responsible for making sure each choice that is
possible at this juncture is represented in the dict, even those with no scores.
Score lists should be in ascending chronological order, that is, the score from the
earliest trial should be listed first.
For example::
{
1: [0.56, 0.61, 0.33, 0.67],
2: [0.25, 0.58],
3: [0.60, 0.65, 0.68],
} | [
"Select",
"the",
"next",
"best",
"choice",
"to",
"make"
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/selector.py#L40-L65 | train | 205,251 |
HDI-Project/BTB | btb/selection/best.py | BestKReward.compute_rewards | def compute_rewards(self, scores):
"""Retain the K best scores, and replace the rest with nans"""
if len(scores) > self.k:
scores = np.copy(scores)
inds = np.argsort(scores)[:-self.k]
scores[inds] = np.nan
return list(scores) | python | def compute_rewards(self, scores):
"""Retain the K best scores, and replace the rest with nans"""
if len(scores) > self.k:
scores = np.copy(scores)
inds = np.argsort(scores)[:-self.k]
scores[inds] = np.nan
return list(scores) | [
"def",
"compute_rewards",
"(",
"self",
",",
"scores",
")",
":",
"if",
"len",
"(",
"scores",
")",
">",
"self",
".",
"k",
":",
"scores",
"=",
"np",
".",
"copy",
"(",
"scores",
")",
"inds",
"=",
"np",
".",
"argsort",
"(",
"scores",
")",
"[",
":",
... | Retain the K best scores, and replace the rest with nans | [
"Retain",
"the",
"K",
"best",
"scores",
"and",
"replace",
"the",
"rest",
"with",
"nans"
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/best.py#L30-L37 | train | 205,252 |
HDI-Project/BTB | btb/selection/best.py | BestKVelocity.compute_rewards | def compute_rewards(self, scores):
"""Compute the velocity of the best scores
The velocities are the k distances between the k+1 best scores.
"""
k = self.k
m = max(len(scores) - k, 0)
best_scores = sorted(scores)[-k - 1:]
velocities = np.diff(best_scores)
nans = np.full(m, np.nan)
return list(velocities) + list(nans) | python | def compute_rewards(self, scores):
"""Compute the velocity of the best scores
The velocities are the k distances between the k+1 best scores.
"""
k = self.k
m = max(len(scores) - k, 0)
best_scores = sorted(scores)[-k - 1:]
velocities = np.diff(best_scores)
nans = np.full(m, np.nan)
return list(velocities) + list(nans) | [
"def",
"compute_rewards",
"(",
"self",
",",
"scores",
")",
":",
"k",
"=",
"self",
".",
"k",
"m",
"=",
"max",
"(",
"len",
"(",
"scores",
")",
"-",
"k",
",",
"0",
")",
"best_scores",
"=",
"sorted",
"(",
"scores",
")",
"[",
"-",
"k",
"-",
"1",
"... | Compute the velocity of the best scores
The velocities are the k distances between the k+1 best scores. | [
"Compute",
"the",
"velocity",
"of",
"the",
"best",
"scores"
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/best.py#L71-L81 | train | 205,253 |
HDI-Project/BTB | btb/tuning/gp.py | GPEiVelocity.predict | def predict(self, X):
"""
Use the POU value we computed in fit to choose randomly between GPEi and
uniform random selection.
"""
if np.random.random() < self.POU:
# choose params at random to avoid local minima
return Uniform(self.tunables).predict(X)
return super(GPEiVelocity, self).predict(X) | python | def predict(self, X):
"""
Use the POU value we computed in fit to choose randomly between GPEi and
uniform random selection.
"""
if np.random.random() < self.POU:
# choose params at random to avoid local minima
return Uniform(self.tunables).predict(X)
return super(GPEiVelocity, self).predict(X) | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"if",
"np",
".",
"random",
".",
"random",
"(",
")",
"<",
"self",
".",
"POU",
":",
"# choose params at random to avoid local minima",
"return",
"Uniform",
"(",
"self",
".",
"tunables",
")",
".",
"predict",
... | Use the POU value we computed in fit to choose randomly between GPEi and
uniform random selection. | [
"Use",
"the",
"POU",
"value",
"we",
"computed",
"in",
"fit",
"to",
"choose",
"randomly",
"between",
"GPEi",
"and",
"uniform",
"random",
"selection",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/gp.py#L117-L126 | train | 205,254 |
HDI-Project/BTB | btb/selection/recent.py | RecentKReward.compute_rewards | def compute_rewards(self, scores):
"""Retain the K most recent scores, and replace the rest with zeros"""
for i in range(len(scores)):
if i >= self.k:
scores[i] = 0.
return scores | python | def compute_rewards(self, scores):
"""Retain the K most recent scores, and replace the rest with zeros"""
for i in range(len(scores)):
if i >= self.k:
scores[i] = 0.
return scores | [
"def",
"compute_rewards",
"(",
"self",
",",
"scores",
")",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"scores",
")",
")",
":",
"if",
"i",
">=",
"self",
".",
"k",
":",
"scores",
"[",
"i",
"]",
"=",
"0.",
"return",
"scores"
] | Retain the K most recent scores, and replace the rest with zeros | [
"Retain",
"the",
"K",
"most",
"recent",
"scores",
"and",
"replace",
"the",
"rest",
"with",
"zeros"
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/recent.py#L24-L29 | train | 205,255 |
HDI-Project/BTB | btb/selection/recent.py | RecentKReward.select | def select(self, choice_scores):
"""Use the top k learner's scores for usage in rewards for the bandit calculation"""
# if we don't have enough scores to do K-selection, fall back to UCB1
min_num_scores = min([len(s) for s in choice_scores.values()])
if min_num_scores >= K_MIN:
logger.info('{klass}: using Best K bandit selection'.format(klass=type(self).__name__))
reward_func = self.compute_rewards
else:
logger.warning(
'{klass}: Not enough choices to do K-selection; using plain UCB1'
.format(klass=type(self).__name__))
reward_func = super(RecentKReward, self).compute_rewards
choice_rewards = {}
for choice, scores in choice_scores.items():
if choice not in self.choices:
continue
choice_rewards[choice] = reward_func(scores)
return self.bandit(choice_rewards) | python | def select(self, choice_scores):
"""Use the top k learner's scores for usage in rewards for the bandit calculation"""
# if we don't have enough scores to do K-selection, fall back to UCB1
min_num_scores = min([len(s) for s in choice_scores.values()])
if min_num_scores >= K_MIN:
logger.info('{klass}: using Best K bandit selection'.format(klass=type(self).__name__))
reward_func = self.compute_rewards
else:
logger.warning(
'{klass}: Not enough choices to do K-selection; using plain UCB1'
.format(klass=type(self).__name__))
reward_func = super(RecentKReward, self).compute_rewards
choice_rewards = {}
for choice, scores in choice_scores.items():
if choice not in self.choices:
continue
choice_rewards[choice] = reward_func(scores)
return self.bandit(choice_rewards) | [
"def",
"select",
"(",
"self",
",",
"choice_scores",
")",
":",
"# if we don't have enough scores to do K-selection, fall back to UCB1",
"min_num_scores",
"=",
"min",
"(",
"[",
"len",
"(",
"s",
")",
"for",
"s",
"in",
"choice_scores",
".",
"values",
"(",
")",
"]",
... | Use the top k learner's scores for usage in rewards for the bandit calculation | [
"Use",
"the",
"top",
"k",
"learner",
"s",
"scores",
"for",
"usage",
"in",
"rewards",
"for",
"the",
"bandit",
"calculation"
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/recent.py#L31-L50 | train | 205,256 |
HDI-Project/BTB | btb/selection/recent.py | RecentKVelocity.compute_rewards | def compute_rewards(self, scores):
"""Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same.
"""
# take the k + 1 most recent scores so we can get k velocities
recent_scores = scores[:-self.k - 2:-1]
velocities = [recent_scores[i] - recent_scores[i + 1] for i in
range(len(recent_scores) - 1)]
# pad the list out with zeros, so the length of the list is
# maintained
zeros = (len(scores) - self.k) * [0]
return velocities + zeros | python | def compute_rewards(self, scores):
"""Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same.
"""
# take the k + 1 most recent scores so we can get k velocities
recent_scores = scores[:-self.k - 2:-1]
velocities = [recent_scores[i] - recent_scores[i + 1] for i in
range(len(recent_scores) - 1)]
# pad the list out with zeros, so the length of the list is
# maintained
zeros = (len(scores) - self.k) * [0]
return velocities + zeros | [
"def",
"compute_rewards",
"(",
"self",
",",
"scores",
")",
":",
"# take the k + 1 most recent scores so we can get k velocities",
"recent_scores",
"=",
"scores",
"[",
":",
"-",
"self",
".",
"k",
"-",
"2",
":",
"-",
"1",
"]",
"velocities",
"=",
"[",
"recent_score... | Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same. | [
"Compute",
"the",
"velocity",
"of",
"thte",
"k",
"+",
"1",
"most",
"recent",
"scores",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/recent.py#L56-L69 | train | 205,257 |
HDI-Project/BTB | btb/selection/hierarchical.py | HierarchicalByAlgorithm.select | def select(self, choice_scores):
"""
Groups the frozen sets by algorithm and first chooses an algorithm based
on the traditional UCB1 criteria.
Next, from that algorithm's frozen sets, makes the final set choice.
"""
# choose algorithm using a bandit
alg_scores = {}
for algorithm, choices in self.by_algorithm.items():
# only make arms for algorithms that have options
if not set(choices) & set(choice_scores.keys()):
continue
# sum up lists to get a list of all the scores from any run of this
# algorithm
sublists = [choice_scores.get(c, []) for c in choices]
alg_scores[algorithm] = sum(sublists, [])
best_algorithm = self.bandit(alg_scores)
# now use only the frozen sets from the chosen algorithm
best_subset = self.by_algorithm[best_algorithm]
normal_ucb1 = UCB1(choices=best_subset)
return normal_ucb1.select(choice_scores) | python | def select(self, choice_scores):
"""
Groups the frozen sets by algorithm and first chooses an algorithm based
on the traditional UCB1 criteria.
Next, from that algorithm's frozen sets, makes the final set choice.
"""
# choose algorithm using a bandit
alg_scores = {}
for algorithm, choices in self.by_algorithm.items():
# only make arms for algorithms that have options
if not set(choices) & set(choice_scores.keys()):
continue
# sum up lists to get a list of all the scores from any run of this
# algorithm
sublists = [choice_scores.get(c, []) for c in choices]
alg_scores[algorithm] = sum(sublists, [])
best_algorithm = self.bandit(alg_scores)
# now use only the frozen sets from the chosen algorithm
best_subset = self.by_algorithm[best_algorithm]
normal_ucb1 = UCB1(choices=best_subset)
return normal_ucb1.select(choice_scores) | [
"def",
"select",
"(",
"self",
",",
"choice_scores",
")",
":",
"# choose algorithm using a bandit",
"alg_scores",
"=",
"{",
"}",
"for",
"algorithm",
",",
"choices",
"in",
"self",
".",
"by_algorithm",
".",
"items",
"(",
")",
":",
"# only make arms for algorithms tha... | Groups the frozen sets by algorithm and first chooses an algorithm based
on the traditional UCB1 criteria.
Next, from that algorithm's frozen sets, makes the final set choice. | [
"Groups",
"the",
"frozen",
"sets",
"by",
"algorithm",
"and",
"first",
"chooses",
"an",
"algorithm",
"based",
"on",
"the",
"traditional",
"UCB1",
"criteria",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/selection/hierarchical.py#L15-L38 | train | 205,258 |
HDI-Project/BTB | btb/tuning/tuner.py | BaseTuner._generate_grid | def _generate_grid(self):
"""Get the all possible values for each of the tunables."""
grid_axes = []
for _, param in self.tunables:
grid_axes.append(param.get_grid_axis(self.grid_width))
return grid_axes | python | def _generate_grid(self):
"""Get the all possible values for each of the tunables."""
grid_axes = []
for _, param in self.tunables:
grid_axes.append(param.get_grid_axis(self.grid_width))
return grid_axes | [
"def",
"_generate_grid",
"(",
"self",
")",
":",
"grid_axes",
"=",
"[",
"]",
"for",
"_",
",",
"param",
"in",
"self",
".",
"tunables",
":",
"grid_axes",
".",
"append",
"(",
"param",
".",
"get_grid_axis",
"(",
"self",
".",
"grid_width",
")",
")",
"return"... | Get the all possible values for each of the tunables. | [
"Get",
"the",
"all",
"possible",
"values",
"for",
"each",
"of",
"the",
"tunables",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/tuner.py#L44-L50 | train | 205,259 |
HDI-Project/BTB | btb/tuning/tuner.py | BaseTuner._candidates_from_grid | def _candidates_from_grid(self, n=1000):
"""Get unused candidates from the grid or parameters."""
used_vectors = set(tuple(v) for v in self.X)
# if every point has been used before, gridding is done.
grid_size = self.grid_width ** len(self.tunables)
if len(used_vectors) == grid_size:
return None
all_vectors = set(itertools.product(*self._grid_axes))
remaining_vectors = all_vectors - used_vectors
candidates = np.array(list(map(np.array, remaining_vectors)))
np.random.shuffle(candidates)
return candidates[0:n] | python | def _candidates_from_grid(self, n=1000):
"""Get unused candidates from the grid or parameters."""
used_vectors = set(tuple(v) for v in self.X)
# if every point has been used before, gridding is done.
grid_size = self.grid_width ** len(self.tunables)
if len(used_vectors) == grid_size:
return None
all_vectors = set(itertools.product(*self._grid_axes))
remaining_vectors = all_vectors - used_vectors
candidates = np.array(list(map(np.array, remaining_vectors)))
np.random.shuffle(candidates)
return candidates[0:n] | [
"def",
"_candidates_from_grid",
"(",
"self",
",",
"n",
"=",
"1000",
")",
":",
"used_vectors",
"=",
"set",
"(",
"tuple",
"(",
"v",
")",
"for",
"v",
"in",
"self",
".",
"X",
")",
"# if every point has been used before, gridding is done.",
"grid_size",
"=",
"self"... | Get unused candidates from the grid or parameters. | [
"Get",
"unused",
"candidates",
"from",
"the",
"grid",
"or",
"parameters",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/tuner.py#L62-L76 | train | 205,260 |
HDI-Project/BTB | btb/tuning/tuner.py | BaseTuner._random_candidates | def _random_candidates(self, n=1000):
"""Generate a matrix of random parameters, column by column."""
candidates = np.zeros((n, len(self.tunables)))
for i, tunable in enumerate(self.tunables):
param = tunable[1]
lo, hi = param.range
if param.is_integer:
column = np.random.randint(lo, hi + 1, size=n)
else:
diff = hi - lo
column = lo + diff * np.random.rand(n)
candidates[:, i] = column
return candidates | python | def _random_candidates(self, n=1000):
"""Generate a matrix of random parameters, column by column."""
candidates = np.zeros((n, len(self.tunables)))
for i, tunable in enumerate(self.tunables):
param = tunable[1]
lo, hi = param.range
if param.is_integer:
column = np.random.randint(lo, hi + 1, size=n)
else:
diff = hi - lo
column = lo + diff * np.random.rand(n)
candidates[:, i] = column
return candidates | [
"def",
"_random_candidates",
"(",
"self",
",",
"n",
"=",
"1000",
")",
":",
"candidates",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"len",
"(",
"self",
".",
"tunables",
")",
")",
")",
"for",
"i",
",",
"tunable",
"in",
"enumerate",
"(",
"self",
... | Generate a matrix of random parameters, column by column. | [
"Generate",
"a",
"matrix",
"of",
"random",
"parameters",
"column",
"by",
"column",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/tuner.py#L78-L94 | train | 205,261 |
HDI-Project/BTB | btb/tuning/tuner.py | BaseTuner._create_candidates | def _create_candidates(self, n=1000):
"""Generate random hyperparameter vectors
Args:
n (int, optional): number of candidates to generate. Defaults to 1000.
Returns:
candidates (np.array): Array of candidate hyperparameter vectors with shape
(n_samples, len(tunables))
"""
# If using a grid, generate a list of previously unused grid points
if self.grid:
return self._candidates_from_grid(n)
# If not using a grid, generate a list of vectors where each parameter
# is chosen uniformly at random
else:
return self._random_candidates(n) | python | def _create_candidates(self, n=1000):
"""Generate random hyperparameter vectors
Args:
n (int, optional): number of candidates to generate. Defaults to 1000.
Returns:
candidates (np.array): Array of candidate hyperparameter vectors with shape
(n_samples, len(tunables))
"""
# If using a grid, generate a list of previously unused grid points
if self.grid:
return self._candidates_from_grid(n)
# If not using a grid, generate a list of vectors where each parameter
# is chosen uniformly at random
else:
return self._random_candidates(n) | [
"def",
"_create_candidates",
"(",
"self",
",",
"n",
"=",
"1000",
")",
":",
"# If using a grid, generate a list of previously unused grid points",
"if",
"self",
".",
"grid",
":",
"return",
"self",
".",
"_candidates_from_grid",
"(",
"n",
")",
"# If not using a grid, gener... | Generate random hyperparameter vectors
Args:
n (int, optional): number of candidates to generate. Defaults to 1000.
Returns:
candidates (np.array): Array of candidate hyperparameter vectors with shape
(n_samples, len(tunables)) | [
"Generate",
"random",
"hyperparameter",
"vectors"
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/tuner.py#L96-L113 | train | 205,262 |
HDI-Project/BTB | btb/tuning/tuner.py | BaseTuner.propose | def propose(self, n=1):
"""Use the trained model to propose a new set of parameters.
Args:
n (int, optional): number of candidates to propose
Returns:
Mapping of tunable name to proposed value. If called with n>1 then proposal is a list
of dictionaries.
"""
proposed_params = []
for i in range(n):
# generate a list of random candidate vectors. If self.grid == True
# each candidate will be a vector that has not been used before.
candidate_params = self._create_candidates()
# create_candidates() returns None when every grid point
# has been tried
if candidate_params is None:
return None
# predict() returns a tuple of predicted values for each candidate
predictions = self.predict(candidate_params)
# acquire() evaluates the list of predictions, selects one,
# and returns its index.
idx = self._acquire(predictions)
# inverse transform acquired hyperparameters
# based on hyparameter type
params = {}
for i in range(candidate_params[idx, :].shape[0]):
inverse_transformed = self.tunables[i][1].inverse_transform(
candidate_params[idx, i]
)
params[self.tunables[i][0]] = inverse_transformed
proposed_params.append(params)
return params if n == 1 else proposed_params | python | def propose(self, n=1):
"""Use the trained model to propose a new set of parameters.
Args:
n (int, optional): number of candidates to propose
Returns:
Mapping of tunable name to proposed value. If called with n>1 then proposal is a list
of dictionaries.
"""
proposed_params = []
for i in range(n):
# generate a list of random candidate vectors. If self.grid == True
# each candidate will be a vector that has not been used before.
candidate_params = self._create_candidates()
# create_candidates() returns None when every grid point
# has been tried
if candidate_params is None:
return None
# predict() returns a tuple of predicted values for each candidate
predictions = self.predict(candidate_params)
# acquire() evaluates the list of predictions, selects one,
# and returns its index.
idx = self._acquire(predictions)
# inverse transform acquired hyperparameters
# based on hyparameter type
params = {}
for i in range(candidate_params[idx, :].shape[0]):
inverse_transformed = self.tunables[i][1].inverse_transform(
candidate_params[idx, i]
)
params[self.tunables[i][0]] = inverse_transformed
proposed_params.append(params)
return params if n == 1 else proposed_params | [
"def",
"propose",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"proposed_params",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n",
")",
":",
"# generate a list of random candidate vectors. If self.grid == True",
"# each candidate will be a vector that has not been used... | Use the trained model to propose a new set of parameters.
Args:
n (int, optional): number of candidates to propose
Returns:
Mapping of tunable name to proposed value. If called with n>1 then proposal is a list
of dictionaries. | [
"Use",
"the",
"trained",
"model",
"to",
"propose",
"a",
"new",
"set",
"of",
"parameters",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/tuner.py#L140-L179 | train | 205,263 |
HDI-Project/BTB | btb/tuning/tuner.py | BaseTuner.add | def add(self, X, y):
"""Add data about known tunable hyperparameter configurations and scores.
Refits model with all data.
Args:
X (Union[Dict[str, object], List[Dict[str, object]]]): dict or list of dicts of
hyperparameter combinations. Keys may only be the name of a tunable, and the
dictionary must contain values for all tunables.
y (Union[float, List[float]]): float or list of floats of scores of the hyperparameter
combinations. Order of scores must match the order of the hyperparameter
dictionaries that the scores corresponds
"""
if isinstance(X, dict):
X = [X]
y = [y]
# transform the list of dictionaries into a np array X_raw
for i in range(len(X)):
each = X[i]
# update best score and hyperparameters
if y[i] > self._best_score:
self._best_score = y[i]
self._best_hyperparams = X[i]
vectorized = []
for tunable in self.tunables:
vectorized.append(each[tunable[0]])
if self.X_raw is not None:
self.X_raw = np.append(
self.X_raw,
np.array([vectorized], dtype=object),
axis=0,
)
else:
self.X_raw = np.array([vectorized], dtype=object)
self.y_raw = np.append(self.y_raw, y)
# transforms each hyperparameter based on hyperparameter type
x_transformed = np.array([], dtype=np.float64)
if len(self.X_raw.shape) > 1 and self.X_raw.shape[1] > 0:
x_transformed = self.tunables[0][1].fit_transform(
self.X_raw[:, 0],
self.y_raw,
).astype(float)
for i in range(1, self.X_raw.shape[1]):
transformed = self.tunables[i][1].fit_transform(
self.X_raw[:, i],
self.y_raw,
).astype(float)
x_transformed = np.column_stack((x_transformed, transformed))
self.fit(x_transformed, self.y_raw) | python | def add(self, X, y):
"""Add data about known tunable hyperparameter configurations and scores.
Refits model with all data.
Args:
X (Union[Dict[str, object], List[Dict[str, object]]]): dict or list of dicts of
hyperparameter combinations. Keys may only be the name of a tunable, and the
dictionary must contain values for all tunables.
y (Union[float, List[float]]): float or list of floats of scores of the hyperparameter
combinations. Order of scores must match the order of the hyperparameter
dictionaries that the scores corresponds
"""
if isinstance(X, dict):
X = [X]
y = [y]
# transform the list of dictionaries into a np array X_raw
for i in range(len(X)):
each = X[i]
# update best score and hyperparameters
if y[i] > self._best_score:
self._best_score = y[i]
self._best_hyperparams = X[i]
vectorized = []
for tunable in self.tunables:
vectorized.append(each[tunable[0]])
if self.X_raw is not None:
self.X_raw = np.append(
self.X_raw,
np.array([vectorized], dtype=object),
axis=0,
)
else:
self.X_raw = np.array([vectorized], dtype=object)
self.y_raw = np.append(self.y_raw, y)
# transforms each hyperparameter based on hyperparameter type
x_transformed = np.array([], dtype=np.float64)
if len(self.X_raw.shape) > 1 and self.X_raw.shape[1] > 0:
x_transformed = self.tunables[0][1].fit_transform(
self.X_raw[:, 0],
self.y_raw,
).astype(float)
for i in range(1, self.X_raw.shape[1]):
transformed = self.tunables[i][1].fit_transform(
self.X_raw[:, i],
self.y_raw,
).astype(float)
x_transformed = np.column_stack((x_transformed, transformed))
self.fit(x_transformed, self.y_raw) | [
"def",
"add",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"if",
"isinstance",
"(",
"X",
",",
"dict",
")",
":",
"X",
"=",
"[",
"X",
"]",
"y",
"=",
"[",
"y",
"]",
"# transform the list of dictionaries into a np array X_raw",
"for",
"i",
"in",
"range",
"... | Add data about known tunable hyperparameter configurations and scores.
Refits model with all data.
Args:
X (Union[Dict[str, object], List[Dict[str, object]]]): dict or list of dicts of
hyperparameter combinations. Keys may only be the name of a tunable, and the
dictionary must contain values for all tunables.
y (Union[float, List[float]]): float or list of floats of scores of the hyperparameter
combinations. Order of scores must match the order of the hyperparameter
dictionaries that the scores corresponds | [
"Add",
"data",
"about",
"known",
"tunable",
"hyperparameter",
"configurations",
"and",
"scores",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/tuner.py#L181-L237 | train | 205,264 |
HDI-Project/BTB | btb/recommendation/recommender.py | BaseRecommender._get_candidates | def _get_candidates(self):
"""Finds the pipelines that are not yet tried.
Returns:
np.array: Indices corresponding to columns in ``dpp_matrix`` that haven't been tried on
``X``. ``None`` if all pipelines have been tried on X.
"""
candidates = np.where(self.dpp_vector == 0)
return None if len(candidates[0]) == 0 else candidates[0] | python | def _get_candidates(self):
"""Finds the pipelines that are not yet tried.
Returns:
np.array: Indices corresponding to columns in ``dpp_matrix`` that haven't been tried on
``X``. ``None`` if all pipelines have been tried on X.
"""
candidates = np.where(self.dpp_vector == 0)
return None if len(candidates[0]) == 0 else candidates[0] | [
"def",
"_get_candidates",
"(",
"self",
")",
":",
"candidates",
"=",
"np",
".",
"where",
"(",
"self",
".",
"dpp_vector",
"==",
"0",
")",
"return",
"None",
"if",
"len",
"(",
"candidates",
"[",
"0",
"]",
")",
"==",
"0",
"else",
"candidates",
"[",
"0",
... | Finds the pipelines that are not yet tried.
Returns:
np.array: Indices corresponding to columns in ``dpp_matrix`` that haven't been tried on
``X``. ``None`` if all pipelines have been tried on X. | [
"Finds",
"the",
"pipelines",
"that",
"are",
"not",
"yet",
"tried",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/recommendation/recommender.py#L59-L67 | train | 205,265 |
HDI-Project/BTB | btb/recommendation/recommender.py | BaseRecommender.propose | def propose(self):
"""Use the trained model to propose a new pipeline.
Returns:
int: Index corresponding to pipeline to try in ``dpp_matrix``.
"""
# generate a list of all the untried candidate pipelines
candidates = self._get_candidates()
# get_candidates() returns None when every possibility has been tried
if candidates is None:
return None
# predict() returns a predicted values for each candidate
predictions = self.predict(candidates)
# acquire() evaluates the list of predictions, selects one, and returns
# its index.
idx = self._acquire(predictions)
return candidates[idx] | python | def propose(self):
"""Use the trained model to propose a new pipeline.
Returns:
int: Index corresponding to pipeline to try in ``dpp_matrix``.
"""
# generate a list of all the untried candidate pipelines
candidates = self._get_candidates()
# get_candidates() returns None when every possibility has been tried
if candidates is None:
return None
# predict() returns a predicted values for each candidate
predictions = self.predict(candidates)
# acquire() evaluates the list of predictions, selects one, and returns
# its index.
idx = self._acquire(predictions)
return candidates[idx] | [
"def",
"propose",
"(",
"self",
")",
":",
"# generate a list of all the untried candidate pipelines",
"candidates",
"=",
"self",
".",
"_get_candidates",
"(",
")",
"# get_candidates() returns None when every possibility has been tried",
"if",
"candidates",
"is",
"None",
":",
"r... | Use the trained model to propose a new pipeline.
Returns:
int: Index corresponding to pipeline to try in ``dpp_matrix``. | [
"Use",
"the",
"trained",
"model",
"to",
"propose",
"a",
"new",
"pipeline",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/recommendation/recommender.py#L69-L88 | train | 205,266 |
HDI-Project/BTB | btb/recommendation/recommender.py | BaseRecommender.add | def add(self, X):
"""Add data about known pipeline and scores.
Updates ``dpp_vector`` and refits model with all data.
Args:
X (dict): mapping of pipeline indices to scores. Keys must correspond to the index of a
column in ``dpp_matrix`` and values are the corresponding score for pipeline on
the dataset.
"""
for each in X:
self.dpp_vector[each] = X[each]
self.fit(self.dpp_vector.reshape(1, -1)) | python | def add(self, X):
"""Add data about known pipeline and scores.
Updates ``dpp_vector`` and refits model with all data.
Args:
X (dict): mapping of pipeline indices to scores. Keys must correspond to the index of a
column in ``dpp_matrix`` and values are the corresponding score for pipeline on
the dataset.
"""
for each in X:
self.dpp_vector[each] = X[each]
self.fit(self.dpp_vector.reshape(1, -1)) | [
"def",
"add",
"(",
"self",
",",
"X",
")",
":",
"for",
"each",
"in",
"X",
":",
"self",
".",
"dpp_vector",
"[",
"each",
"]",
"=",
"X",
"[",
"each",
"]",
"self",
".",
"fit",
"(",
"self",
".",
"dpp_vector",
".",
"reshape",
"(",
"1",
",",
"-",
"1"... | Add data about known pipeline and scores.
Updates ``dpp_vector`` and refits model with all data.
Args:
X (dict): mapping of pipeline indices to scores. Keys must correspond to the index of a
column in ``dpp_matrix`` and values are the corresponding score for pipeline on
the dataset. | [
"Add",
"data",
"about",
"known",
"pipeline",
"and",
"scores",
"."
] | 7f489ebc5591bd0886652ef743098c022d7f7460 | https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/recommendation/recommender.py#L90-L102 | train | 205,267 |
gc3-uzh-ch/elasticluster | elasticluster/providers/azure_provider.py | AzureCloudProvider._init_az_api | def _init_az_api(self):
"""
Initialise client objects for talking to Azure API.
This is in a separate function so to be called by ``__init__``
and ``__setstate__``.
"""
with self.__lock:
if self._resource_client is None:
log.debug("Making Azure `ServicePrincipalcredentials` object"
" with tenant=%r, client_id=%r, secret=%r ...",
self.tenant_id, self.client_id,
('<redacted>' if self.secret else None))
credentials = ServicePrincipalCredentials(
tenant=self.tenant_id,
client_id=self.client_id,
secret=self.secret,
)
log.debug("Initializing Azure `ComputeManagementclient` ...")
self._compute_client = ComputeManagementClient(credentials, self.subscription_id)
log.debug("Initializing Azure `NetworkManagementclient` ...")
self._network_client = NetworkManagementClient(credentials, self.subscription_id)
log.debug("Initializing Azure `ResourceManagementclient` ...")
self._resource_client = ResourceManagementClient(credentials, self.subscription_id)
log.info("Azure API clients initialized.") | python | def _init_az_api(self):
"""
Initialise client objects for talking to Azure API.
This is in a separate function so to be called by ``__init__``
and ``__setstate__``.
"""
with self.__lock:
if self._resource_client is None:
log.debug("Making Azure `ServicePrincipalcredentials` object"
" with tenant=%r, client_id=%r, secret=%r ...",
self.tenant_id, self.client_id,
('<redacted>' if self.secret else None))
credentials = ServicePrincipalCredentials(
tenant=self.tenant_id,
client_id=self.client_id,
secret=self.secret,
)
log.debug("Initializing Azure `ComputeManagementclient` ...")
self._compute_client = ComputeManagementClient(credentials, self.subscription_id)
log.debug("Initializing Azure `NetworkManagementclient` ...")
self._network_client = NetworkManagementClient(credentials, self.subscription_id)
log.debug("Initializing Azure `ResourceManagementclient` ...")
self._resource_client = ResourceManagementClient(credentials, self.subscription_id)
log.info("Azure API clients initialized.") | [
"def",
"_init_az_api",
"(",
"self",
")",
":",
"with",
"self",
".",
"__lock",
":",
"if",
"self",
".",
"_resource_client",
"is",
"None",
":",
"log",
".",
"debug",
"(",
"\"Making Azure `ServicePrincipalcredentials` object\"",
"\" with tenant=%r, client_id=%r, secret=%r ...... | Initialise client objects for talking to Azure API.
This is in a separate function so to be called by ``__init__``
and ``__setstate__``. | [
"Initialise",
"client",
"objects",
"for",
"talking",
"to",
"Azure",
"API",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/azure_provider.py#L179-L203 | train | 205,268 |
gc3-uzh-ch/elasticluster | elasticluster/providers/azure_provider.py | AzureCloudProvider.start_instance | def start_instance(self, key_name, public_key_path, private_key_path,
security_group, flavor, image_id, image_userdata,
username='root',
node_name=None,
boot_disk_size=30,
storage_account_type='Standard_LRS',
**extra):
"""
Start a new VM using the given properties.
:param str key_name:
**unused in Azure**, only present for interface compatibility
:param str public_key_path:
path to ssh public key to authorize on the VM (for user `username`, see below)
:param str private_key_path:
**unused in Azure**, only present for interface compatibility
:param str security_group:
network security group to attach VM to, **currently unused**
:param str flavor:
machine type to use for the instance
:param str image_id:
disk image to use for the instance;
has the form *publisher/offer/sku/version*
(e.g., ``canonical/ubuntuserver/16.04.0-LTS/latest``)
:param str image_userdata:
command to execute after startup, **currently unused**
:param int boot_disk_size:
size of boot disk to use; values are specified in gigabytes.
:param str username:
username for the given ssh key
(default is ``root`` as it's always guaranteed to exist,
but you probably don't want to use that)
:param str storage_account_type:
Type of disks to attach to the VM. For a list of valid values,
see: https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#diskstorageaccounttypes
:return: tuple[str, str] -- resource group and node name of the started VM
"""
self._init_az_api()
# Warn of unsupported parameters, if set. We do not warn
# about `user_key` or `private_key_path` since they come from
# a `[login/*]` section and those can be shared across
# different cloud providers.
if security_group and security_group != 'default':
warn("Setting `security_group` is currently not supported"
" in the Azure cloud; VMs will all be attached to"
" a network security group named after the cluster name.")
if image_userdata:
warn("Parameter `image_userdata` is currently not supported"
" in the Azure cloud and will be ignored.")
# Use the cluster name to identify the Azure resource group;
# however, `Node.cluster_name` is not passed down here so
# extract it from the node name, which always contains it as
# the substring before the leftmost dash (see `cluster.py`,
# line 1182)
cluster_name, _ = node_name.split('-', 1)
with self.__lock:
if cluster_name not in self._resource_groups_created:
self._resource_client.resource_groups.create_or_update(
cluster_name, {'location': self.location})
self._resource_groups_created.add(cluster_name)
# read public SSH key
with open(public_key_path, 'r') as public_key_file:
public_key = public_key_file.read()
image_publisher, image_offer, \
image_sku, image_version = self._split_image_id(image_id)
if not security_group:
security_group = (cluster_name + '-secgroup')
net_parameters = {
'networkSecurityGroupName': {
'value': security_group,
},
'subnetName': { 'value': cluster_name },
}
net_name = net_parameters['subnetName']['value']
with self.__lock:
if net_name not in self._networks_created:
log.debug(
"Creating network `%s` in Azure ...", net_name)
oper = self._resource_client.deployments.create_or_update(
cluster_name, net_name, {
'mode': DeploymentMode.incremental,
'template': self.net_deployment_template,
'parameters': net_parameters,
})
oper.wait()
self._networks_created.add(net_name)
boot_disk_size_gb = int(boot_disk_size)
vm_parameters = {
'adminUserName': { 'value': username },
'imagePublisher': { 'value': image_publisher }, # e.g., 'canonical'
'imageOffer': { 'value': image_offer }, # e.g., ubuntuserver
'imageSku': { 'value': image_sku }, # e.g., '16.04.0-LTS'
'imageVersion': { 'value': image_version }, # e.g., 'latest'
'networkSecurityGroupName': {
'value': security_group,
},
'sshKeyData': { 'value': public_key },
'storageAccountName': {
'value': self._make_storage_account_name(
cluster_name, node_name)
},
'storageAccountType': { 'value': storage_account_type },
'subnetName': { 'value': cluster_name },
'vmName': { 'value': node_name },
'vmSize': { 'value': flavor },
'bootDiskSize': { 'value': boot_disk_size_gb}
}
log.debug(
"Deploying `%s` VM template to Azure ...",
vm_parameters['vmName']['value'])
oper = self._resource_client.deployments.create_or_update(
cluster_name, node_name, {
'mode': DeploymentMode.incremental,
'template': self.vm_deployment_template,
'parameters': vm_parameters,
})
oper.wait()
# the `instance_id` is a composite type since we need both the
# resource group name and the vm name to uniquely identify a VM
return [cluster_name, node_name] | python | def start_instance(self, key_name, public_key_path, private_key_path,
security_group, flavor, image_id, image_userdata,
username='root',
node_name=None,
boot_disk_size=30,
storage_account_type='Standard_LRS',
**extra):
"""
Start a new VM using the given properties.
:param str key_name:
**unused in Azure**, only present for interface compatibility
:param str public_key_path:
path to ssh public key to authorize on the VM (for user `username`, see below)
:param str private_key_path:
**unused in Azure**, only present for interface compatibility
:param str security_group:
network security group to attach VM to, **currently unused**
:param str flavor:
machine type to use for the instance
:param str image_id:
disk image to use for the instance;
has the form *publisher/offer/sku/version*
(e.g., ``canonical/ubuntuserver/16.04.0-LTS/latest``)
:param str image_userdata:
command to execute after startup, **currently unused**
:param int boot_disk_size:
size of boot disk to use; values are specified in gigabytes.
:param str username:
username for the given ssh key
(default is ``root`` as it's always guaranteed to exist,
but you probably don't want to use that)
:param str storage_account_type:
Type of disks to attach to the VM. For a list of valid values,
see: https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#diskstorageaccounttypes
:return: tuple[str, str] -- resource group and node name of the started VM
"""
self._init_az_api()
# Warn of unsupported parameters, if set. We do not warn
# about `user_key` or `private_key_path` since they come from
# a `[login/*]` section and those can be shared across
# different cloud providers.
if security_group and security_group != 'default':
warn("Setting `security_group` is currently not supported"
" in the Azure cloud; VMs will all be attached to"
" a network security group named after the cluster name.")
if image_userdata:
warn("Parameter `image_userdata` is currently not supported"
" in the Azure cloud and will be ignored.")
# Use the cluster name to identify the Azure resource group;
# however, `Node.cluster_name` is not passed down here so
# extract it from the node name, which always contains it as
# the substring before the leftmost dash (see `cluster.py`,
# line 1182)
cluster_name, _ = node_name.split('-', 1)
with self.__lock:
if cluster_name not in self._resource_groups_created:
self._resource_client.resource_groups.create_or_update(
cluster_name, {'location': self.location})
self._resource_groups_created.add(cluster_name)
# read public SSH key
with open(public_key_path, 'r') as public_key_file:
public_key = public_key_file.read()
image_publisher, image_offer, \
image_sku, image_version = self._split_image_id(image_id)
if not security_group:
security_group = (cluster_name + '-secgroup')
net_parameters = {
'networkSecurityGroupName': {
'value': security_group,
},
'subnetName': { 'value': cluster_name },
}
net_name = net_parameters['subnetName']['value']
with self.__lock:
if net_name not in self._networks_created:
log.debug(
"Creating network `%s` in Azure ...", net_name)
oper = self._resource_client.deployments.create_or_update(
cluster_name, net_name, {
'mode': DeploymentMode.incremental,
'template': self.net_deployment_template,
'parameters': net_parameters,
})
oper.wait()
self._networks_created.add(net_name)
boot_disk_size_gb = int(boot_disk_size)
vm_parameters = {
'adminUserName': { 'value': username },
'imagePublisher': { 'value': image_publisher }, # e.g., 'canonical'
'imageOffer': { 'value': image_offer }, # e.g., ubuntuserver
'imageSku': { 'value': image_sku }, # e.g., '16.04.0-LTS'
'imageVersion': { 'value': image_version }, # e.g., 'latest'
'networkSecurityGroupName': {
'value': security_group,
},
'sshKeyData': { 'value': public_key },
'storageAccountName': {
'value': self._make_storage_account_name(
cluster_name, node_name)
},
'storageAccountType': { 'value': storage_account_type },
'subnetName': { 'value': cluster_name },
'vmName': { 'value': node_name },
'vmSize': { 'value': flavor },
'bootDiskSize': { 'value': boot_disk_size_gb}
}
log.debug(
"Deploying `%s` VM template to Azure ...",
vm_parameters['vmName']['value'])
oper = self._resource_client.deployments.create_or_update(
cluster_name, node_name, {
'mode': DeploymentMode.incremental,
'template': self.vm_deployment_template,
'parameters': vm_parameters,
})
oper.wait()
# the `instance_id` is a composite type since we need both the
# resource group name and the vm name to uniquely identify a VM
return [cluster_name, node_name] | [
"def",
"start_instance",
"(",
"self",
",",
"key_name",
",",
"public_key_path",
",",
"private_key_path",
",",
"security_group",
",",
"flavor",
",",
"image_id",
",",
"image_userdata",
",",
"username",
"=",
"'root'",
",",
"node_name",
"=",
"None",
",",
"boot_disk_s... | Start a new VM using the given properties.
:param str key_name:
**unused in Azure**, only present for interface compatibility
:param str public_key_path:
path to ssh public key to authorize on the VM (for user `username`, see below)
:param str private_key_path:
**unused in Azure**, only present for interface compatibility
:param str security_group:
network security group to attach VM to, **currently unused**
:param str flavor:
machine type to use for the instance
:param str image_id:
disk image to use for the instance;
has the form *publisher/offer/sku/version*
(e.g., ``canonical/ubuntuserver/16.04.0-LTS/latest``)
:param str image_userdata:
command to execute after startup, **currently unused**
:param int boot_disk_size:
size of boot disk to use; values are specified in gigabytes.
:param str username:
username for the given ssh key
(default is ``root`` as it's always guaranteed to exist,
but you probably don't want to use that)
:param str storage_account_type:
Type of disks to attach to the VM. For a list of valid values,
see: https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#diskstorageaccounttypes
:return: tuple[str, str] -- resource group and node name of the started VM | [
"Start",
"a",
"new",
"VM",
"using",
"the",
"given",
"properties",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/azure_provider.py#L206-L334 | train | 205,269 |
gc3-uzh-ch/elasticluster | elasticluster/providers/azure_provider.py | AzureCloudProvider.is_instance_running | def is_instance_running(self, instance_id):
"""
Check if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
"""
self._init_az_api()
# Here, it's always better if we update the instance.
vm = self._get_vm(instance_id, force_reload=True)
# FIXME: should we rather check `vm.instance_view.statuses`
# and search for `.code == "PowerState/running"`? or
# `vm.instance_view.vm_agent.statuses` and search for `.code
# == 'ProvisioningState/suceeded'`?
return vm.provisioning_state == u'Succeeded' | python | def is_instance_running(self, instance_id):
"""
Check if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise
"""
self._init_az_api()
# Here, it's always better if we update the instance.
vm = self._get_vm(instance_id, force_reload=True)
# FIXME: should we rather check `vm.instance_view.statuses`
# and search for `.code == "PowerState/running"`? or
# `vm.instance_view.vm_agent.statuses` and search for `.code
# == 'ProvisioningState/suceeded'`?
return vm.provisioning_state == u'Succeeded' | [
"def",
"is_instance_running",
"(",
"self",
",",
"instance_id",
")",
":",
"self",
".",
"_init_az_api",
"(",
")",
"# Here, it's always better if we update the instance.",
"vm",
"=",
"self",
".",
"_get_vm",
"(",
"instance_id",
",",
"force_reload",
"=",
"True",
")",
"... | Check if the instance is up and running.
:param str instance_id: instance identifier
:return: bool - True if running, False otherwise | [
"Check",
"if",
"the",
"instance",
"is",
"up",
"and",
"running",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/azure_provider.py#L428-L443 | train | 205,270 |
gc3-uzh-ch/elasticluster | elasticluster/providers/azure_provider.py | AzureCloudProvider._get_vm | def _get_vm(self, instance_id, force_reload=True):
"""
Return details on the VM with the given name.
:param str node_name: instance identifier
:param bool force_reload:
if ``True``, skip searching caches and reload instance from server
and immediately reload instance data from cloud provider
:return: py:class:`novaclient.v1_1.servers.Server` - instance
:raises: `InstanceError` is returned if the instance can't
be found in the local cache or in the cloud.
"""
self._init_az_api()
if force_reload:
# Remove from cache and get from server again
self._inventory = {}
cluster_name, node_name = instance_id
self._init_inventory(cluster_name)
# if instance is known, return it
if node_name not in self._vm_details:
vm_info = self._compute_client.virtual_machines.get(
cluster_name, node_name, 'instanceView')
self._vm_details[node_name] = vm_info
try:
return self._vm_details[node_name]
except KeyError:
raise InstanceNotFoundError(
"Instance `{instance_id}` not found"
.format(instance_id=instance_id)) | python | def _get_vm(self, instance_id, force_reload=True):
"""
Return details on the VM with the given name.
:param str node_name: instance identifier
:param bool force_reload:
if ``True``, skip searching caches and reload instance from server
and immediately reload instance data from cloud provider
:return: py:class:`novaclient.v1_1.servers.Server` - instance
:raises: `InstanceError` is returned if the instance can't
be found in the local cache or in the cloud.
"""
self._init_az_api()
if force_reload:
# Remove from cache and get from server again
self._inventory = {}
cluster_name, node_name = instance_id
self._init_inventory(cluster_name)
# if instance is known, return it
if node_name not in self._vm_details:
vm_info = self._compute_client.virtual_machines.get(
cluster_name, node_name, 'instanceView')
self._vm_details[node_name] = vm_info
try:
return self._vm_details[node_name]
except KeyError:
raise InstanceNotFoundError(
"Instance `{instance_id}` not found"
.format(instance_id=instance_id)) | [
"def",
"_get_vm",
"(",
"self",
",",
"instance_id",
",",
"force_reload",
"=",
"True",
")",
":",
"self",
".",
"_init_az_api",
"(",
")",
"if",
"force_reload",
":",
"# Remove from cache and get from server again",
"self",
".",
"_inventory",
"=",
"{",
"}",
"cluster_n... | Return details on the VM with the given name.
:param str node_name: instance identifier
:param bool force_reload:
if ``True``, skip searching caches and reload instance from server
and immediately reload instance data from cloud provider
:return: py:class:`novaclient.v1_1.servers.Server` - instance
:raises: `InstanceError` is returned if the instance can't
be found in the local cache or in the cloud. | [
"Return",
"details",
"on",
"the",
"VM",
"with",
"the",
"given",
"name",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/azure_provider.py#L445-L475 | train | 205,271 |
gc3-uzh-ch/elasticluster | elasticluster/gc3pie_config.py | inspect_node | def inspect_node(node):
"""
This function accept a `elasticluster.cluster.Node` class,
connects to a node and tries to discover the kind of batch system
installed, and some other information.
"""
node_information = {}
ssh = node.connect()
if not ssh:
log.error("Unable to connect to node %s", node.name)
return
(_in, _out, _err) = ssh.exec_command("(type >& /dev/null -a srun && echo slurm) \
|| (type >& /dev/null -a qconf && echo sge) \
|| (type >& /dev/null -a pbsnodes && echo pbs) \
|| echo UNKNOWN")
node_information['type'] = _out.read().strip()
(_in, _out, _err) = ssh.exec_command("arch")
node_information['architecture'] = _out.read().strip()
if node_information['type'] == 'slurm':
inspect_slurm_cluster(ssh, node_information)
elif node_information['type'] == 'sge':
inspect_sge_cluster(ssh, node_information)
ssh.close()
return node_information | python | def inspect_node(node):
"""
This function accept a `elasticluster.cluster.Node` class,
connects to a node and tries to discover the kind of batch system
installed, and some other information.
"""
node_information = {}
ssh = node.connect()
if not ssh:
log.error("Unable to connect to node %s", node.name)
return
(_in, _out, _err) = ssh.exec_command("(type >& /dev/null -a srun && echo slurm) \
|| (type >& /dev/null -a qconf && echo sge) \
|| (type >& /dev/null -a pbsnodes && echo pbs) \
|| echo UNKNOWN")
node_information['type'] = _out.read().strip()
(_in, _out, _err) = ssh.exec_command("arch")
node_information['architecture'] = _out.read().strip()
if node_information['type'] == 'slurm':
inspect_slurm_cluster(ssh, node_information)
elif node_information['type'] == 'sge':
inspect_sge_cluster(ssh, node_information)
ssh.close()
return node_information | [
"def",
"inspect_node",
"(",
"node",
")",
":",
"node_information",
"=",
"{",
"}",
"ssh",
"=",
"node",
".",
"connect",
"(",
")",
"if",
"not",
"ssh",
":",
"log",
".",
"error",
"(",
"\"Unable to connect to node %s\"",
",",
"node",
".",
"name",
")",
"return",... | This function accept a `elasticluster.cluster.Node` class,
connects to a node and tries to discover the kind of batch system
installed, and some other information. | [
"This",
"function",
"accept",
"a",
"elasticluster",
".",
"cluster",
".",
"Node",
"class",
"connects",
"to",
"a",
"node",
"and",
"tries",
"to",
"discover",
"the",
"kind",
"of",
"batch",
"system",
"installed",
"and",
"some",
"other",
"information",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/gc3pie_config.py#L183-L209 | train | 205,272 |
gc3-uzh-ch/elasticluster | elasticluster/gc3pie_config.py | create_gc3pie_config_snippet | def create_gc3pie_config_snippet(cluster):
"""
Create a configuration file snippet to be used with GC3Pie.
"""
auth_section = 'auth/elasticluster_%s' % cluster.name
resource_section = 'resource/elasticluster_%s' % cluster.name
cfg = RawConfigParser()
cfg.add_section(auth_section)
frontend_node = cluster.get_ssh_to_node()
cfg.set(auth_section, 'type', 'ssh')
cfg.set(auth_section, 'username', frontend_node.image_user)
cluster_info = inspect_node(frontend_node)
cfg.add_section(resource_section)
cfg.set(resource_section, 'enabled', 'yes')
cfg.set(resource_section, 'transport', 'ssh')
cfg.set(resource_section, 'frontend', frontend_node.preferred_ip)
if not cluster_info:
log.error("Unable to gather enough information from the cluster. "
"Following informatino are only partial!")
cluster_info = {'architecture': 'unknown',
'type': 'unknown',
'max_cores': -1,
'max_cores_per_job': -1,
'max_memory_per_core': -1,
'max_walltime': '672hours'}
cfg.set(resource_section, 'type', cluster_info['type'])
cfg.set(resource_section, 'architecture', cluster_info['architecture'])
cfg.set(resource_section, 'max_cores', cluster_info.get('max_cores', 1))
cfg.set(resource_section, 'max_cores_per_job', cluster_info.get('max_cores_per_job', 1))
cfg.set(resource_section, 'max_memory_per_core', cluster_info.get('max_memory_per_core', '2GB'))
cfg.set(resource_section, 'max_walltime', cluster_info.get('max_walltime', '672hours'))
cfgstring = StringIO()
cfg.write(cfgstring)
return cfgstring.getvalue() | python | def create_gc3pie_config_snippet(cluster):
"""
Create a configuration file snippet to be used with GC3Pie.
"""
auth_section = 'auth/elasticluster_%s' % cluster.name
resource_section = 'resource/elasticluster_%s' % cluster.name
cfg = RawConfigParser()
cfg.add_section(auth_section)
frontend_node = cluster.get_ssh_to_node()
cfg.set(auth_section, 'type', 'ssh')
cfg.set(auth_section, 'username', frontend_node.image_user)
cluster_info = inspect_node(frontend_node)
cfg.add_section(resource_section)
cfg.set(resource_section, 'enabled', 'yes')
cfg.set(resource_section, 'transport', 'ssh')
cfg.set(resource_section, 'frontend', frontend_node.preferred_ip)
if not cluster_info:
log.error("Unable to gather enough information from the cluster. "
"Following informatino are only partial!")
cluster_info = {'architecture': 'unknown',
'type': 'unknown',
'max_cores': -1,
'max_cores_per_job': -1,
'max_memory_per_core': -1,
'max_walltime': '672hours'}
cfg.set(resource_section, 'type', cluster_info['type'])
cfg.set(resource_section, 'architecture', cluster_info['architecture'])
cfg.set(resource_section, 'max_cores', cluster_info.get('max_cores', 1))
cfg.set(resource_section, 'max_cores_per_job', cluster_info.get('max_cores_per_job', 1))
cfg.set(resource_section, 'max_memory_per_core', cluster_info.get('max_memory_per_core', '2GB'))
cfg.set(resource_section, 'max_walltime', cluster_info.get('max_walltime', '672hours'))
cfgstring = StringIO()
cfg.write(cfgstring)
return cfgstring.getvalue() | [
"def",
"create_gc3pie_config_snippet",
"(",
"cluster",
")",
":",
"auth_section",
"=",
"'auth/elasticluster_%s'",
"%",
"cluster",
".",
"name",
"resource_section",
"=",
"'resource/elasticluster_%s'",
"%",
"cluster",
".",
"name",
"cfg",
"=",
"RawConfigParser",
"(",
")",
... | Create a configuration file snippet to be used with GC3Pie. | [
"Create",
"a",
"configuration",
"file",
"snippet",
"to",
"be",
"used",
"with",
"GC3Pie",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/gc3pie_config.py#L211-L250 | train | 205,273 |
gc3-uzh-ch/elasticluster | elasticluster/providers/gce.py | GoogleCloudProvider._execute_request | def _execute_request(self, request):
"""Helper method to execute a request, since a lock should be used
to not fire up multiple requests at the same time.
:return: Result of `request.execute`
"""
with GoogleCloudProvider.__gce_lock:
return request.execute(http=self._auth_http) | python | def _execute_request(self, request):
"""Helper method to execute a request, since a lock should be used
to not fire up multiple requests at the same time.
:return: Result of `request.execute`
"""
with GoogleCloudProvider.__gce_lock:
return request.execute(http=self._auth_http) | [
"def",
"_execute_request",
"(",
"self",
",",
"request",
")",
":",
"with",
"GoogleCloudProvider",
".",
"__gce_lock",
":",
"return",
"request",
".",
"execute",
"(",
"http",
"=",
"self",
".",
"_auth_http",
")"
] | Helper method to execute a request, since a lock should be used
to not fire up multiple requests at the same time.
:return: Result of `request.execute` | [
"Helper",
"method",
"to",
"execute",
"a",
"request",
"since",
"a",
"lock",
"should",
"be",
"used",
"to",
"not",
"fire",
"up",
"multiple",
"requests",
"at",
"the",
"same",
"time",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/gce.py#L201-L208 | train | 205,274 |
gc3-uzh-ch/elasticluster | elasticluster/providers/gce.py | GoogleCloudProvider._wait_until_done | def _wait_until_done(self, response, wait=30):
"""Blocks until the operation status is done for the given operation.
:param response: The response object used in a previous GCE call.
:param int wait: Wait up to this number of seconds in between
successive polling of the GCE status.
"""
gce = self._connect()
status = response['status']
while status != 'DONE' and response:
# wait a random amount of time (up to `wait` seconds)
if wait:
time.sleep(1 + random.randrange(wait))
operation_id = response['name']
# Identify if this is a per-zone resource
if 'zone' in response:
zone_name = response['zone'].split('/')[-1]
request = gce.zoneOperations().get(
project=self._project_id, operation=operation_id,
zone=zone_name)
else:
request = gce.globalOperations().get(
project=self._project_id,
operation=operation_id)
response = self._execute_request(request)
if response:
status = response['status']
return response | python | def _wait_until_done(self, response, wait=30):
"""Blocks until the operation status is done for the given operation.
:param response: The response object used in a previous GCE call.
:param int wait: Wait up to this number of seconds in between
successive polling of the GCE status.
"""
gce = self._connect()
status = response['status']
while status != 'DONE' and response:
# wait a random amount of time (up to `wait` seconds)
if wait:
time.sleep(1 + random.randrange(wait))
operation_id = response['name']
# Identify if this is a per-zone resource
if 'zone' in response:
zone_name = response['zone'].split('/')[-1]
request = gce.zoneOperations().get(
project=self._project_id, operation=operation_id,
zone=zone_name)
else:
request = gce.globalOperations().get(
project=self._project_id,
operation=operation_id)
response = self._execute_request(request)
if response:
status = response['status']
return response | [
"def",
"_wait_until_done",
"(",
"self",
",",
"response",
",",
"wait",
"=",
"30",
")",
":",
"gce",
"=",
"self",
".",
"_connect",
"(",
")",
"status",
"=",
"response",
"[",
"'status'",
"]",
"while",
"status",
"!=",
"'DONE'",
"and",
"response",
":",
"# wai... | Blocks until the operation status is done for the given operation.
:param response: The response object used in a previous GCE call.
:param int wait: Wait up to this number of seconds in between
successive polling of the GCE status. | [
"Blocks",
"until",
"the",
"operation",
"status",
"is",
"done",
"for",
"the",
"given",
"operation",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/gce.py#L213-L246 | train | 205,275 |
gc3-uzh-ch/elasticluster | elasticluster/providers/gce.py | GoogleCloudProvider.pause_instance | def pause_instance(self, instance_id):
"""Pauses the instance, retaining disk and config.
:param str instance_id: instance identifier
:raises: `InstanceError` if instance cannot be paused
:return: dict - information needed to restart instance.
"""
if not instance_id:
log.info("Instance to pause has no instance id.")
return
gce = self._connect()
try:
request = gce.instances().stop(project=self._project_id,
instance=instance_id,
zone=self._zone)
operation = self._execute_request(request)
response = self._wait_until_done(operation)
self._check_response(response)
return {"instance_id": instance_id}
except HttpError as e:
log.error("Error stopping instance: `%s", e)
raise InstanceError("Error stopping instance `%s`", e) | python | def pause_instance(self, instance_id):
"""Pauses the instance, retaining disk and config.
:param str instance_id: instance identifier
:raises: `InstanceError` if instance cannot be paused
:return: dict - information needed to restart instance.
"""
if not instance_id:
log.info("Instance to pause has no instance id.")
return
gce = self._connect()
try:
request = gce.instances().stop(project=self._project_id,
instance=instance_id,
zone=self._zone)
operation = self._execute_request(request)
response = self._wait_until_done(operation)
self._check_response(response)
return {"instance_id": instance_id}
except HttpError as e:
log.error("Error stopping instance: `%s", e)
raise InstanceError("Error stopping instance `%s`", e) | [
"def",
"pause_instance",
"(",
"self",
",",
"instance_id",
")",
":",
"if",
"not",
"instance_id",
":",
"log",
".",
"info",
"(",
"\"Instance to pause has no instance id.\"",
")",
"return",
"gce",
"=",
"self",
".",
"_connect",
"(",
")",
"try",
":",
"request",
"=... | Pauses the instance, retaining disk and config.
:param str instance_id: instance identifier
:raises: `InstanceError` if instance cannot be paused
:return: dict - information needed to restart instance. | [
"Pauses",
"the",
"instance",
"retaining",
"disk",
"and",
"config",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/gce.py#L532-L557 | train | 205,276 |
gc3-uzh-ch/elasticluster | elasticluster/providers/gce.py | GoogleCloudProvider.resume_instance | def resume_instance(self, paused_info):
"""Restarts a paused instance, retaining disk and config.
:param str instance_id: instance identifier
:raises: `InstanceError` if instance cannot be resumed.
:return: dict - information needed to restart instance.
"""
if not paused_info.get("instance_id"):
log.info("Instance to stop has no instance id.")
return
gce = self._connect()
try:
request = gce.instances().start(project=self._project_id,
instance=paused_info["instance_id"],
zone=self._zone)
operation = self._execute_request(request)
response = self._wait_until_done(operation)
self._check_response(response)
return
except HttpError as e:
log.error("Error restarting instance: `%s", e)
raise InstanceError("Error restarting instance `%s`", e) | python | def resume_instance(self, paused_info):
"""Restarts a paused instance, retaining disk and config.
:param str instance_id: instance identifier
:raises: `InstanceError` if instance cannot be resumed.
:return: dict - information needed to restart instance.
"""
if not paused_info.get("instance_id"):
log.info("Instance to stop has no instance id.")
return
gce = self._connect()
try:
request = gce.instances().start(project=self._project_id,
instance=paused_info["instance_id"],
zone=self._zone)
operation = self._execute_request(request)
response = self._wait_until_done(operation)
self._check_response(response)
return
except HttpError as e:
log.error("Error restarting instance: `%s", e)
raise InstanceError("Error restarting instance `%s`", e) | [
"def",
"resume_instance",
"(",
"self",
",",
"paused_info",
")",
":",
"if",
"not",
"paused_info",
".",
"get",
"(",
"\"instance_id\"",
")",
":",
"log",
".",
"info",
"(",
"\"Instance to stop has no instance id.\"",
")",
"return",
"gce",
"=",
"self",
".",
"_connec... | Restarts a paused instance, retaining disk and config.
:param str instance_id: instance identifier
:raises: `InstanceError` if instance cannot be resumed.
:return: dict - information needed to restart instance. | [
"Restarts",
"a",
"paused",
"instance",
"retaining",
"disk",
"and",
"config",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/gce.py#L559-L584 | train | 205,277 |
gc3-uzh-ch/elasticluster | elasticluster/providers/gce.py | GoogleCloudProvider.list_instances | def list_instances(self, filter=None):
"""List instances on GCE, optionally filtering the results.
:param str filter: Filter specification; see https://developers.google.com/compute/docs/reference/latest/instances/list for details.
:return: list of instances
"""
gce = self._connect()
try:
request = gce.instances().list(
project=self._project_id, filter=filter, zone=self._zone)
response = self._execute_request(request)
self._check_response(response)
except (HttpError, CloudProviderError) as e:
raise InstanceError("could not retrieve all instances on the "
"cloud: ``" % e)
if response and 'items' in response:
return response['items']
else:
return list() | python | def list_instances(self, filter=None):
"""List instances on GCE, optionally filtering the results.
:param str filter: Filter specification; see https://developers.google.com/compute/docs/reference/latest/instances/list for details.
:return: list of instances
"""
gce = self._connect()
try:
request = gce.instances().list(
project=self._project_id, filter=filter, zone=self._zone)
response = self._execute_request(request)
self._check_response(response)
except (HttpError, CloudProviderError) as e:
raise InstanceError("could not retrieve all instances on the "
"cloud: ``" % e)
if response and 'items' in response:
return response['items']
else:
return list() | [
"def",
"list_instances",
"(",
"self",
",",
"filter",
"=",
"None",
")",
":",
"gce",
"=",
"self",
".",
"_connect",
"(",
")",
"try",
":",
"request",
"=",
"gce",
".",
"instances",
"(",
")",
".",
"list",
"(",
"project",
"=",
"self",
".",
"_project_id",
... | List instances on GCE, optionally filtering the results.
:param str filter: Filter specification; see https://developers.google.com/compute/docs/reference/latest/instances/list for details.
:return: list of instances | [
"List",
"instances",
"on",
"GCE",
"optionally",
"filtering",
"the",
"results",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/gce.py#L618-L638 | train | 205,278 |
gc3-uzh-ch/elasticluster | elasticluster/providers/gce.py | GoogleCloudProvider.is_instance_running | def is_instance_running(self, instance_id):
"""Check whether the instance is up and running.
:param str instance_id: instance identifier
:reutrn: True if instance is running, False otherwise
"""
items = self.list_instances(filter=('name eq "%s"' % instance_id))
for item in items:
if item['status'] == 'RUNNING':
return True
return False | python | def is_instance_running(self, instance_id):
"""Check whether the instance is up and running.
:param str instance_id: instance identifier
:reutrn: True if instance is running, False otherwise
"""
items = self.list_instances(filter=('name eq "%s"' % instance_id))
for item in items:
if item['status'] == 'RUNNING':
return True
return False | [
"def",
"is_instance_running",
"(",
"self",
",",
"instance_id",
")",
":",
"items",
"=",
"self",
".",
"list_instances",
"(",
"filter",
"=",
"(",
"'name eq \"%s\"'",
"%",
"instance_id",
")",
")",
"for",
"item",
"in",
"items",
":",
"if",
"item",
"[",
"'status'... | Check whether the instance is up and running.
:param str instance_id: instance identifier
:reutrn: True if instance is running, False otherwise | [
"Check",
"whether",
"the",
"instance",
"is",
"up",
"and",
"running",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/gce.py#L684-L694 | train | 205,279 |
gc3-uzh-ch/elasticluster | elasticluster/providers/openstack.py | OpenStackCloudProvider.__init_keystone_session | def __init_keystone_session(self):
"""Create and return a Keystone session object."""
api = self._identity_api_version # for readability
tried = []
if api in ['3', None]:
sess = self.__init_keystone_session_v3(check=(api is None))
tried.append('v3')
if sess:
return sess
if api in ['2', None]:
sess = self.__init_keystone_session_v2(check=(api is None))
tried.append('v2')
if sess:
return sess
raise RuntimeError(
"Cannot establish Keystone session (tried: {0})."
.format(', '.join(tried))) | python | def __init_keystone_session(self):
"""Create and return a Keystone session object."""
api = self._identity_api_version # for readability
tried = []
if api in ['3', None]:
sess = self.__init_keystone_session_v3(check=(api is None))
tried.append('v3')
if sess:
return sess
if api in ['2', None]:
sess = self.__init_keystone_session_v2(check=(api is None))
tried.append('v2')
if sess:
return sess
raise RuntimeError(
"Cannot establish Keystone session (tried: {0})."
.format(', '.join(tried))) | [
"def",
"__init_keystone_session",
"(",
"self",
")",
":",
"api",
"=",
"self",
".",
"_identity_api_version",
"# for readability",
"tried",
"=",
"[",
"]",
"if",
"api",
"in",
"[",
"'3'",
",",
"None",
"]",
":",
"sess",
"=",
"self",
".",
"__init_keystone_session_v... | Create and return a Keystone session object. | [
"Create",
"and",
"return",
"a",
"Keystone",
"session",
"object",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/openstack.py#L314-L330 | train | 205,280 |
gc3-uzh-ch/elasticluster | elasticluster/providers/openstack.py | OpenStackCloudProvider.__init_keystone_session_v2 | def __init_keystone_session_v2(self, check=False):
"""Create and return a session object using Keystone API v2."""
from keystoneauth1 import loading as keystone_v2
loader = keystone_v2.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=self._os_auth_url,
username=self._os_username,
password=self._os_password,
project_name=self._os_tenant_name,
)
sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert)
if check:
log.debug("Checking that Keystone API v2 session works...")
try:
# if session is invalid, the following will raise some exception
nova = nova_client.Client(self._compute_api_version, session=sess, cacert=self._os_cacert)
nova.flavors.list()
except keystoneauth1.exceptions.NotFound as err:
log.warning("Creating Keystone v2 session failed: %s", err)
return None
except keystoneauth1.exceptions.ClientException as err:
log.error("OpenStack server rejected request (likely configuration error?): %s", err)
return None # FIXME: should we be raising an error instead?
# if we got to this point, v2 session is valid
log.info("Using Keystone API v2 session to authenticate to OpenStack")
return sess | python | def __init_keystone_session_v2(self, check=False):
"""Create and return a session object using Keystone API v2."""
from keystoneauth1 import loading as keystone_v2
loader = keystone_v2.get_plugin_loader('password')
auth = loader.load_from_options(
auth_url=self._os_auth_url,
username=self._os_username,
password=self._os_password,
project_name=self._os_tenant_name,
)
sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert)
if check:
log.debug("Checking that Keystone API v2 session works...")
try:
# if session is invalid, the following will raise some exception
nova = nova_client.Client(self._compute_api_version, session=sess, cacert=self._os_cacert)
nova.flavors.list()
except keystoneauth1.exceptions.NotFound as err:
log.warning("Creating Keystone v2 session failed: %s", err)
return None
except keystoneauth1.exceptions.ClientException as err:
log.error("OpenStack server rejected request (likely configuration error?): %s", err)
return None # FIXME: should we be raising an error instead?
# if we got to this point, v2 session is valid
log.info("Using Keystone API v2 session to authenticate to OpenStack")
return sess | [
"def",
"__init_keystone_session_v2",
"(",
"self",
",",
"check",
"=",
"False",
")",
":",
"from",
"keystoneauth1",
"import",
"loading",
"as",
"keystone_v2",
"loader",
"=",
"keystone_v2",
".",
"get_plugin_loader",
"(",
"'password'",
")",
"auth",
"=",
"loader",
".",... | Create and return a session object using Keystone API v2. | [
"Create",
"and",
"return",
"a",
"session",
"object",
"using",
"Keystone",
"API",
"v2",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/openstack.py#L332-L357 | train | 205,281 |
gc3-uzh-ch/elasticluster | elasticluster/providers/openstack.py | OpenStackCloudProvider.__init_keystone_session_v3 | def __init_keystone_session_v3(self, check=False):
"""
Return a new session object, created using Keystone API v3.
.. note::
Note that the only supported authN method is password authentication;
token or other plug-ins are not currently supported.
"""
try:
# may fail on Python 2.6?
from keystoneauth1.identity import v3 as keystone_v3
except ImportError:
log.warning("Cannot load Keystone API v3 library.")
return None
auth = keystone_v3.Password(
auth_url=self._os_auth_url,
username=self._os_username,
password=self._os_password,
user_domain_name=self._os_user_domain_name,
project_domain_name=self._os_project_domain_name,
project_name=self._os_tenant_name,
)
sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert)
if check:
log.debug("Checking that Keystone API v3 session works...")
try:
# if session is invalid, the following will raise some exception
nova = nova_client.Client(self._compute_api_version, session=sess)
nova.flavors.list()
except keystoneauth1.exceptions.NotFound as err:
log.warning("Creating Keystone v3 session failed: %s", err)
return None
except keystoneauth1.exceptions.ClientException as err:
log.error("OpenStack server rejected request (likely configuration error?): %s", err)
return None # FIXME: should we be raising an error instead?
# if we got to this point, v3 session is valid
log.info("Using Keystone API v3 session to authenticate to OpenStack")
return sess | python | def __init_keystone_session_v3(self, check=False):
"""
Return a new session object, created using Keystone API v3.
.. note::
Note that the only supported authN method is password authentication;
token or other plug-ins are not currently supported.
"""
try:
# may fail on Python 2.6?
from keystoneauth1.identity import v3 as keystone_v3
except ImportError:
log.warning("Cannot load Keystone API v3 library.")
return None
auth = keystone_v3.Password(
auth_url=self._os_auth_url,
username=self._os_username,
password=self._os_password,
user_domain_name=self._os_user_domain_name,
project_domain_name=self._os_project_domain_name,
project_name=self._os_tenant_name,
)
sess = keystoneauth1.session.Session(auth=auth, verify=self._os_cacert)
if check:
log.debug("Checking that Keystone API v3 session works...")
try:
# if session is invalid, the following will raise some exception
nova = nova_client.Client(self._compute_api_version, session=sess)
nova.flavors.list()
except keystoneauth1.exceptions.NotFound as err:
log.warning("Creating Keystone v3 session failed: %s", err)
return None
except keystoneauth1.exceptions.ClientException as err:
log.error("OpenStack server rejected request (likely configuration error?): %s", err)
return None # FIXME: should we be raising an error instead?
# if we got to this point, v3 session is valid
log.info("Using Keystone API v3 session to authenticate to OpenStack")
return sess | [
"def",
"__init_keystone_session_v3",
"(",
"self",
",",
"check",
"=",
"False",
")",
":",
"try",
":",
"# may fail on Python 2.6?",
"from",
"keystoneauth1",
".",
"identity",
"import",
"v3",
"as",
"keystone_v3",
"except",
"ImportError",
":",
"log",
".",
"warning",
"... | Return a new session object, created using Keystone API v3.
.. note::
Note that the only supported authN method is password authentication;
token or other plug-ins are not currently supported. | [
"Return",
"a",
"new",
"session",
"object",
"created",
"using",
"Keystone",
"API",
"v3",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/openstack.py#L359-L397 | train | 205,282 |
gc3-uzh-ch/elasticluster | elasticluster/providers/openstack.py | OpenStackCloudProvider._check_security_groups | def _check_security_groups(self, names):
"""
Raise an exception if any of the named security groups does not exist.
:param List[str] groups: List of security group names
:raises: `SecurityGroupError` if group does not exist
"""
self._init_os_api()
log.debug("Checking existence of security group(s) %s ...", names)
try:
# python-novaclient < 8.0.0
security_groups = self.nova_client.security_groups.list()
existing = set(sg.name for sg in security_groups)
except AttributeError:
security_groups = self.neutron_client.list_security_groups()['security_groups']
existing = set(sg[u'name'] for sg in security_groups)
# TODO: We should be able to create the security group if it
# doesn't exist and at least add a rule to accept ssh access.
# Also, we should be able to add new rules to a security group
# if needed.
nonexisting = set(names) - existing
if nonexisting:
raise SecurityGroupError(
"Security group(s) `{0}` do not exist"
.format(', '.join(nonexisting)))
# if we get to this point, all sec groups exist
return True | python | def _check_security_groups(self, names):
"""
Raise an exception if any of the named security groups does not exist.
:param List[str] groups: List of security group names
:raises: `SecurityGroupError` if group does not exist
"""
self._init_os_api()
log.debug("Checking existence of security group(s) %s ...", names)
try:
# python-novaclient < 8.0.0
security_groups = self.nova_client.security_groups.list()
existing = set(sg.name for sg in security_groups)
except AttributeError:
security_groups = self.neutron_client.list_security_groups()['security_groups']
existing = set(sg[u'name'] for sg in security_groups)
# TODO: We should be able to create the security group if it
# doesn't exist and at least add a rule to accept ssh access.
# Also, we should be able to add new rules to a security group
# if needed.
nonexisting = set(names) - existing
if nonexisting:
raise SecurityGroupError(
"Security group(s) `{0}` do not exist"
.format(', '.join(nonexisting)))
# if we get to this point, all sec groups exist
return True | [
"def",
"_check_security_groups",
"(",
"self",
",",
"names",
")",
":",
"self",
".",
"_init_os_api",
"(",
")",
"log",
".",
"debug",
"(",
"\"Checking existence of security group(s) %s ...\"",
",",
"names",
")",
"try",
":",
"# python-novaclient < 8.0.0",
"security_groups"... | Raise an exception if any of the named security groups does not exist.
:param List[str] groups: List of security group names
:raises: `SecurityGroupError` if group does not exist | [
"Raise",
"an",
"exception",
"if",
"any",
"of",
"the",
"named",
"security",
"groups",
"does",
"not",
"exist",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/openstack.py#L720-L748 | train | 205,283 |
gc3-uzh-ch/elasticluster | elasticluster/providers/openstack.py | OpenStackCloudProvider._get_images | def _get_images(self):
"""Get available images. We cache the results in order to reduce
network usage.
"""
self._init_os_api()
try:
# python-novaclient < 8.0.0
return self.nova_client.images.list()
except AttributeError:
# ``glance_client.images.list()`` returns a generator, but callers
# of `._get_images()` expect a Python list
return list(self.glance_client.images.list()) | python | def _get_images(self):
"""Get available images. We cache the results in order to reduce
network usage.
"""
self._init_os_api()
try:
# python-novaclient < 8.0.0
return self.nova_client.images.list()
except AttributeError:
# ``glance_client.images.list()`` returns a generator, but callers
# of `._get_images()` expect a Python list
return list(self.glance_client.images.list()) | [
"def",
"_get_images",
"(",
"self",
")",
":",
"self",
".",
"_init_os_api",
"(",
")",
"try",
":",
"# python-novaclient < 8.0.0",
"return",
"self",
".",
"nova_client",
".",
"images",
".",
"list",
"(",
")",
"except",
"AttributeError",
":",
"# ``glance_client.images.... | Get available images. We cache the results in order to reduce
network usage. | [
"Get",
"available",
"images",
".",
"We",
"cache",
"the",
"results",
"in",
"order",
"to",
"reduce",
"network",
"usage",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/openstack.py#L751-L763 | train | 205,284 |
gc3-uzh-ch/elasticluster | elasticluster/__main__.py | ElastiCluster.main | def main(self):
"""
This is the main entry point of the ElastiCluster CLI.
First the central configuration is created, which can be altered
through the command line interface. Then the given command from
the command line interface is called.
"""
assert self.params.func, "No subcommand defined in `ElastiCluster.main()"
try:
return self.params.func()
except Exception as err:
log.error("Error: %s", err)
if self.params.verbose > 2:
import traceback
traceback.print_exc()
print("Aborting because of errors: {err}.".format(err=err))
sys.exit(1) | python | def main(self):
"""
This is the main entry point of the ElastiCluster CLI.
First the central configuration is created, which can be altered
through the command line interface. Then the given command from
the command line interface is called.
"""
assert self.params.func, "No subcommand defined in `ElastiCluster.main()"
try:
return self.params.func()
except Exception as err:
log.error("Error: %s", err)
if self.params.verbose > 2:
import traceback
traceback.print_exc()
print("Aborting because of errors: {err}.".format(err=err))
sys.exit(1) | [
"def",
"main",
"(",
"self",
")",
":",
"assert",
"self",
".",
"params",
".",
"func",
",",
"\"No subcommand defined in `ElastiCluster.main()\"",
"try",
":",
"return",
"self",
".",
"params",
".",
"func",
"(",
")",
"except",
"Exception",
"as",
"err",
":",
"log",... | This is the main entry point of the ElastiCluster CLI.
First the central configuration is created, which can be altered
through the command line interface. Then the given command from
the command line interface is called. | [
"This",
"is",
"the",
"main",
"entry",
"point",
"of",
"the",
"ElastiCluster",
"CLI",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/__main__.py#L196-L213 | train | 205,285 |
gc3-uzh-ch/elasticluster | elasticluster/utils.py | confirm_or_abort | def confirm_or_abort(prompt, exitcode=os.EX_TEMPFAIL, msg=None, **extra_args):
"""
Prompt user for confirmation and exit on negative reply.
Arguments `prompt` and `extra_args` will be passed unchanged to
`click.confirm`:func: (which is used for actual prompting).
:param str prompt: Prompt string to display.
:param int exitcode: Program exit code if negative reply given.
:param str msg: Message to display before exiting.
"""
if click.confirm(prompt, **extra_args):
return True
else:
# abort
if msg:
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit(exitcode) | python | def confirm_or_abort(prompt, exitcode=os.EX_TEMPFAIL, msg=None, **extra_args):
"""
Prompt user for confirmation and exit on negative reply.
Arguments `prompt` and `extra_args` will be passed unchanged to
`click.confirm`:func: (which is used for actual prompting).
:param str prompt: Prompt string to display.
:param int exitcode: Program exit code if negative reply given.
:param str msg: Message to display before exiting.
"""
if click.confirm(prompt, **extra_args):
return True
else:
# abort
if msg:
sys.stderr.write(msg)
sys.stderr.write('\n')
sys.exit(exitcode) | [
"def",
"confirm_or_abort",
"(",
"prompt",
",",
"exitcode",
"=",
"os",
".",
"EX_TEMPFAIL",
",",
"msg",
"=",
"None",
",",
"*",
"*",
"extra_args",
")",
":",
"if",
"click",
".",
"confirm",
"(",
"prompt",
",",
"*",
"*",
"extra_args",
")",
":",
"return",
"... | Prompt user for confirmation and exit on negative reply.
Arguments `prompt` and `extra_args` will be passed unchanged to
`click.confirm`:func: (which is used for actual prompting).
:param str prompt: Prompt string to display.
:param int exitcode: Program exit code if negative reply given.
:param str msg: Message to display before exiting. | [
"Prompt",
"user",
"for",
"confirmation",
"and",
"exit",
"on",
"negative",
"reply",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L45-L63 | train | 205,286 |
gc3-uzh-ch/elasticluster | elasticluster/utils.py | environment | def environment(**kv):
"""
Context manager to run Python code with a modified UNIX process environment.
All key/value pairs in the keyword arguments are added (or changed, if the
key names an existing environmental variable) in the process environment
upon entrance into the context. Changes are undone upon exit: added
environmental variables are removed from the environment, and those whose
value was changed are reset to their pristine value.
"""
added = []
changed = {}
for key, value in kv.items():
if key not in os.environ:
added.append(key)
else:
changed[key] = os.environ[key]
os.environ[key] = value
yield
# restore pristine process environment
for key in added:
del os.environ[key]
for key in changed:
os.environ[key] = changed[key] | python | def environment(**kv):
"""
Context manager to run Python code with a modified UNIX process environment.
All key/value pairs in the keyword arguments are added (or changed, if the
key names an existing environmental variable) in the process environment
upon entrance into the context. Changes are undone upon exit: added
environmental variables are removed from the environment, and those whose
value was changed are reset to their pristine value.
"""
added = []
changed = {}
for key, value in kv.items():
if key not in os.environ:
added.append(key)
else:
changed[key] = os.environ[key]
os.environ[key] = value
yield
# restore pristine process environment
for key in added:
del os.environ[key]
for key in changed:
os.environ[key] = changed[key] | [
"def",
"environment",
"(",
"*",
"*",
"kv",
")",
":",
"added",
"=",
"[",
"]",
"changed",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"kv",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"os",
".",
"environ",
":",
"added",
".",
"ap... | Context manager to run Python code with a modified UNIX process environment.
All key/value pairs in the keyword arguments are added (or changed, if the
key names an existing environmental variable) in the process environment
upon entrance into the context. Changes are undone upon exit: added
environmental variables are removed from the environment, and those whose
value was changed are reset to their pristine value. | [
"Context",
"manager",
"to",
"run",
"Python",
"code",
"with",
"a",
"modified",
"UNIX",
"process",
"environment",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L67-L92 | train | 205,287 |
gc3-uzh-ch/elasticluster | elasticluster/utils.py | expand_ssh_proxy_command | def expand_ssh_proxy_command(command, user, addr, port=22):
"""
Expand spacial digraphs ``%h``, ``%p``, and ``%r``.
Return a copy of `command` with the following string
substitutions applied:
* ``%h`` is replaced by *addr*
* ``%p`` is replaced by *port*
* ``%r`` is replaced by *user*
* ``%%`` is replaced by ``%``.
See also: man page ``ssh_config``, section "TOKENS".
"""
translated = []
subst = {
'h': list(str(addr)),
'p': list(str(port)),
'r': list(str(user)),
'%': ['%'],
}
escaped = False
for char in command:
if char == '%':
escaped = True
continue
if escaped:
try:
translated.extend(subst[char])
escaped = False
continue
except KeyError:
raise ValueError(
"Unknown digraph `%{0}`"
" in proxy command string `{1}`"
.format(char, command))
else:
translated.append(char)
continue
return ''.join(translated) | python | def expand_ssh_proxy_command(command, user, addr, port=22):
"""
Expand spacial digraphs ``%h``, ``%p``, and ``%r``.
Return a copy of `command` with the following string
substitutions applied:
* ``%h`` is replaced by *addr*
* ``%p`` is replaced by *port*
* ``%r`` is replaced by *user*
* ``%%`` is replaced by ``%``.
See also: man page ``ssh_config``, section "TOKENS".
"""
translated = []
subst = {
'h': list(str(addr)),
'p': list(str(port)),
'r': list(str(user)),
'%': ['%'],
}
escaped = False
for char in command:
if char == '%':
escaped = True
continue
if escaped:
try:
translated.extend(subst[char])
escaped = False
continue
except KeyError:
raise ValueError(
"Unknown digraph `%{0}`"
" in proxy command string `{1}`"
.format(char, command))
else:
translated.append(char)
continue
return ''.join(translated) | [
"def",
"expand_ssh_proxy_command",
"(",
"command",
",",
"user",
",",
"addr",
",",
"port",
"=",
"22",
")",
":",
"translated",
"=",
"[",
"]",
"subst",
"=",
"{",
"'h'",
":",
"list",
"(",
"str",
"(",
"addr",
")",
")",
",",
"'p'",
":",
"list",
"(",
"s... | Expand spacial digraphs ``%h``, ``%p``, and ``%r``.
Return a copy of `command` with the following string
substitutions applied:
* ``%h`` is replaced by *addr*
* ``%p`` is replaced by *port*
* ``%r`` is replaced by *user*
* ``%%`` is replaced by ``%``.
See also: man page ``ssh_config``, section "TOKENS". | [
"Expand",
"spacial",
"digraphs",
"%h",
"%p",
"and",
"%r",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L95-L134 | train | 205,288 |
gc3-uzh-ch/elasticluster | elasticluster/utils.py | get_num_processors | def get_num_processors():
"""
Return number of online processor cores.
"""
# try different strategies and use first one that succeeeds
try:
return os.cpu_count() # Py3 only
except AttributeError:
pass
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError: # no multiprocessing?
pass
except NotImplementedError:
# multiprocessing cannot determine CPU count
pass
try:
from subprocess32 import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess32?
pass
try:
from subprocess import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess.check_call (Py 2.6)
pass
raise RuntimeError("Cannot determine number of processors") | python | def get_num_processors():
"""
Return number of online processor cores.
"""
# try different strategies and use first one that succeeeds
try:
return os.cpu_count() # Py3 only
except AttributeError:
pass
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError: # no multiprocessing?
pass
except NotImplementedError:
# multiprocessing cannot determine CPU count
pass
try:
from subprocess32 import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess32?
pass
try:
from subprocess import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess.check_call (Py 2.6)
pass
raise RuntimeError("Cannot determine number of processors") | [
"def",
"get_num_processors",
"(",
")",
":",
"# try different strategies and use first one that succeeeds",
"try",
":",
"return",
"os",
".",
"cpu_count",
"(",
")",
"# Py3 only",
"except",
"AttributeError",
":",
"pass",
"try",
":",
"import",
"multiprocessing",
"return",
... | Return number of online processor cores. | [
"Return",
"number",
"of",
"online",
"processor",
"cores",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L137-L176 | train | 205,289 |
gc3-uzh-ch/elasticluster | elasticluster/utils.py | sighandler | def sighandler(signum, handler):
"""
Context manager to run code with UNIX signal `signum` bound to `handler`.
The existing handler is saved upon entering the context and restored upon
exit.
The `handler` argument may be anything that can be passed to Python's
`signal.signal <https://docs.python.org/2/library/signal.html#signal.signal>`_
standard library call.
"""
prev_handler = signal.getsignal(signum)
signal.signal(signum, handler)
yield
signal.signal(signum, prev_handler) | python | def sighandler(signum, handler):
"""
Context manager to run code with UNIX signal `signum` bound to `handler`.
The existing handler is saved upon entering the context and restored upon
exit.
The `handler` argument may be anything that can be passed to Python's
`signal.signal <https://docs.python.org/2/library/signal.html#signal.signal>`_
standard library call.
"""
prev_handler = signal.getsignal(signum)
signal.signal(signum, handler)
yield
signal.signal(signum, prev_handler) | [
"def",
"sighandler",
"(",
"signum",
",",
"handler",
")",
":",
"prev_handler",
"=",
"signal",
".",
"getsignal",
"(",
"signum",
")",
"signal",
".",
"signal",
"(",
"signum",
",",
"handler",
")",
"yield",
"signal",
".",
"signal",
"(",
"signum",
",",
"prev_ha... | Context manager to run code with UNIX signal `signum` bound to `handler`.
The existing handler is saved upon entering the context and restored upon
exit.
The `handler` argument may be anything that can be passed to Python's
`signal.signal <https://docs.python.org/2/library/signal.html#signal.signal>`_
standard library call. | [
"Context",
"manager",
"to",
"run",
"code",
"with",
"UNIX",
"signal",
"signum",
"bound",
"to",
"handler",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L481-L495 | train | 205,290 |
gc3-uzh-ch/elasticluster | elasticluster/utils.py | temporary_dir | def temporary_dir(delete=True, dir=None,
prefix='elasticluster.', suffix='.d'):
"""
Make a temporary directory and make it current for the code in this context.
Delete temporary directory upon exit from the context, unless
``delete=False`` is passed in the arguments.
Arguments *suffix*, *prefix* and *dir* are exactly as in
:func:`tempfile.mkdtemp()` (but have different defaults).
"""
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp(suffix, prefix, dir)
os.chdir(tmpdir)
yield
os.chdir(cwd)
if delete:
shutil.rmtree(tmpdir, ignore_errors=True) | python | def temporary_dir(delete=True, dir=None,
prefix='elasticluster.', suffix='.d'):
"""
Make a temporary directory and make it current for the code in this context.
Delete temporary directory upon exit from the context, unless
``delete=False`` is passed in the arguments.
Arguments *suffix*, *prefix* and *dir* are exactly as in
:func:`tempfile.mkdtemp()` (but have different defaults).
"""
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp(suffix, prefix, dir)
os.chdir(tmpdir)
yield
os.chdir(cwd)
if delete:
shutil.rmtree(tmpdir, ignore_errors=True) | [
"def",
"temporary_dir",
"(",
"delete",
"=",
"True",
",",
"dir",
"=",
"None",
",",
"prefix",
"=",
"'elasticluster.'",
",",
"suffix",
"=",
"'.d'",
")",
":",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"tmpdir",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"suf... | Make a temporary directory and make it current for the code in this context.
Delete temporary directory upon exit from the context, unless
``delete=False`` is passed in the arguments.
Arguments *suffix*, *prefix* and *dir* are exactly as in
:func:`tempfile.mkdtemp()` (but have different defaults). | [
"Make",
"a",
"temporary",
"directory",
"and",
"make",
"it",
"current",
"for",
"the",
"code",
"in",
"this",
"context",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L499-L516 | train | 205,291 |
gc3-uzh-ch/elasticluster | elasticluster/utils.py | timeout | def timeout(delay, handler=None):
"""
Context manager to run code and deliver a SIGALRM signal after `delay` seconds.
Note that `delay` must be a whole number; otherwise it is converted to an
integer by Python's `int()` built-in function. For floating-point numbers,
that means rounding off to the nearest integer from below.
If the optional argument `handler` is supplied, it must be a callable that
is invoked if the alarm triggers while the code is still running. If no
`handler` is provided (default), then a `RuntimeError` with message
``Timeout`` is raised.
"""
delay = int(delay)
if handler is None:
def default_handler(signum, frame):
raise RuntimeError("{:d} seconds timeout expired".format(delay))
handler = default_handler
prev_sigalrm_handler = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, handler)
signal.alarm(delay)
yield
signal.alarm(0)
signal.signal(signal.SIGALRM, prev_sigalrm_handler) | python | def timeout(delay, handler=None):
"""
Context manager to run code and deliver a SIGALRM signal after `delay` seconds.
Note that `delay` must be a whole number; otherwise it is converted to an
integer by Python's `int()` built-in function. For floating-point numbers,
that means rounding off to the nearest integer from below.
If the optional argument `handler` is supplied, it must be a callable that
is invoked if the alarm triggers while the code is still running. If no
`handler` is provided (default), then a `RuntimeError` with message
``Timeout`` is raised.
"""
delay = int(delay)
if handler is None:
def default_handler(signum, frame):
raise RuntimeError("{:d} seconds timeout expired".format(delay))
handler = default_handler
prev_sigalrm_handler = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, handler)
signal.alarm(delay)
yield
signal.alarm(0)
signal.signal(signal.SIGALRM, prev_sigalrm_handler) | [
"def",
"timeout",
"(",
"delay",
",",
"handler",
"=",
"None",
")",
":",
"delay",
"=",
"int",
"(",
"delay",
")",
"if",
"handler",
"is",
"None",
":",
"def",
"default_handler",
"(",
"signum",
",",
"frame",
")",
":",
"raise",
"RuntimeError",
"(",
"\"{:d} se... | Context manager to run code and deliver a SIGALRM signal after `delay` seconds.
Note that `delay` must be a whole number; otherwise it is converted to an
integer by Python's `int()` built-in function. For floating-point numbers,
that means rounding off to the nearest integer from below.
If the optional argument `handler` is supplied, it must be a callable that
is invoked if the alarm triggers while the code is still running. If no
`handler` is provided (default), then a `RuntimeError` with message
``Timeout`` is raised. | [
"Context",
"manager",
"to",
"run",
"code",
"and",
"deliver",
"a",
"SIGALRM",
"signal",
"after",
"delay",
"seconds",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L520-L543 | train | 205,292 |
gc3-uzh-ch/elasticluster | elasticluster/utils.py | format_warning_oneline | def format_warning_oneline(message, category, filename, lineno,
file=None, line=None):
"""
Format a warning for logging.
The returned value should be a single-line string, for better
logging style (although this is not enforced by the code).
This methods' arguments have the same meaning of the
like-named arguments from `warnings.formatwarning`.
"""
# `warnings.formatwarning` produces multi-line output that does
# not look good in a log file, so let us replace it with something
# simpler...
return ('{category}: {message}'
.format(message=message, category=category.__name__)) | python | def format_warning_oneline(message, category, filename, lineno,
file=None, line=None):
"""
Format a warning for logging.
The returned value should be a single-line string, for better
logging style (although this is not enforced by the code).
This methods' arguments have the same meaning of the
like-named arguments from `warnings.formatwarning`.
"""
# `warnings.formatwarning` produces multi-line output that does
# not look good in a log file, so let us replace it with something
# simpler...
return ('{category}: {message}'
.format(message=message, category=category.__name__)) | [
"def",
"format_warning_oneline",
"(",
"message",
",",
"category",
",",
"filename",
",",
"lineno",
",",
"file",
"=",
"None",
",",
"line",
"=",
"None",
")",
":",
"# `warnings.formatwarning` produces multi-line output that does",
"# not look good in a log file, so let us repla... | Format a warning for logging.
The returned value should be a single-line string, for better
logging style (although this is not enforced by the code).
This methods' arguments have the same meaning of the
like-named arguments from `warnings.formatwarning`. | [
"Format",
"a",
"warning",
"for",
"logging",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L619-L634 | train | 205,293 |
gc3-uzh-ch/elasticluster | elasticluster/utils.py | redirect_warnings | def redirect_warnings(capture=True, logger='py.warnings'):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
assert _warnings_showwarning is None
_warnings_showwarning = warnings.showwarning
# `warnings.showwarning` must be a function, a generic
# callable object is not accepted ...
warnings.showwarning = _WarningsLogger(logger, format_warning_oneline).__call__
else:
assert _warnings_showwarning is not None
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None | python | def redirect_warnings(capture=True, logger='py.warnings'):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
assert _warnings_showwarning is None
_warnings_showwarning = warnings.showwarning
# `warnings.showwarning` must be a function, a generic
# callable object is not accepted ...
warnings.showwarning = _WarningsLogger(logger, format_warning_oneline).__call__
else:
assert _warnings_showwarning is not None
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None | [
"def",
"redirect_warnings",
"(",
"capture",
"=",
"True",
",",
"logger",
"=",
"'py.warnings'",
")",
":",
"global",
"_warnings_showwarning",
"if",
"capture",
":",
"assert",
"_warnings_showwarning",
"is",
"None",
"_warnings_showwarning",
"=",
"warnings",
".",
"showwarn... | If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations. | [
"If",
"capture",
"is",
"true",
"redirect",
"all",
"warnings",
"to",
"the",
"logging",
"package",
".",
"If",
"capture",
"is",
"False",
"ensure",
"that",
"warnings",
"are",
"not",
"redirected",
"to",
"logging",
"but",
"to",
"their",
"original",
"destinations",
... | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L637-L653 | train | 205,294 |
gc3-uzh-ch/elasticluster | elasticluster/providers/__init__.py | AbstractCloudProvider.start_instance | def start_instance(self, key_name, public_key_path, private_key_path,
security_group, flavor, image_id, image_userdata,
username=None, node_name=None):
"""Starts a new instance on the cloud using the given properties.
Multiple instances might be started in different threads at the same
time. The implementation should handle any problems regarding this
itself.
:param str key_name: name of the ssh key to connect
:param str public_key_path: path to ssh public key
:param str private_key_path: path to ssh private key
:param str security_group: firewall rule definition to apply on the
instance
:param str flavor: machine type to use for the instance
:param str image_name: image type (os) to use for the instance
:param str image_userdata: command to execute after startup
:param str username: username for the given ssh key, default None
:return: str - instance id of the started instance
"""
pass | python | def start_instance(self, key_name, public_key_path, private_key_path,
security_group, flavor, image_id, image_userdata,
username=None, node_name=None):
"""Starts a new instance on the cloud using the given properties.
Multiple instances might be started in different threads at the same
time. The implementation should handle any problems regarding this
itself.
:param str key_name: name of the ssh key to connect
:param str public_key_path: path to ssh public key
:param str private_key_path: path to ssh private key
:param str security_group: firewall rule definition to apply on the
instance
:param str flavor: machine type to use for the instance
:param str image_name: image type (os) to use for the instance
:param str image_userdata: command to execute after startup
:param str username: username for the given ssh key, default None
:return: str - instance id of the started instance
"""
pass | [
"def",
"start_instance",
"(",
"self",
",",
"key_name",
",",
"public_key_path",
",",
"private_key_path",
",",
"security_group",
",",
"flavor",
",",
"image_id",
",",
"image_userdata",
",",
"username",
"=",
"None",
",",
"node_name",
"=",
"None",
")",
":",
"pass"
... | Starts a new instance on the cloud using the given properties.
Multiple instances might be started in different threads at the same
time. The implementation should handle any problems regarding this
itself.
:param str key_name: name of the ssh key to connect
:param str public_key_path: path to ssh public key
:param str private_key_path: path to ssh private key
:param str security_group: firewall rule definition to apply on the
instance
:param str flavor: machine type to use for the instance
:param str image_name: image type (os) to use for the instance
:param str image_userdata: command to execute after startup
:param str username: username for the given ssh key, default None
:return: str - instance id of the started instance | [
"Starts",
"a",
"new",
"instance",
"on",
"the",
"cloud",
"using",
"the",
"given",
"properties",
".",
"Multiple",
"instances",
"might",
"be",
"started",
"in",
"different",
"threads",
"at",
"the",
"same",
"time",
".",
"The",
"implementation",
"should",
"handle",
... | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/__init__.py#L52-L72 | train | 205,295 |
gc3-uzh-ch/elasticluster | elasticluster/providers/libcloud_provider.py | LibCloudProvider.__get_name_or_id | def __get_name_or_id(values, known):
"""
Return list of values that match attribute ``.id`` or ``.name`` of any object in list `known`.
:param str values: comma-separated list (i.e., a Python string) of items
:param list known: list of libcloud items to filter
:return: list of the libcloud items that match the given values
"""
result = list()
for element in [e.strip() for e in values.split(',')]:
for item in [i for i in known if i.name == element or i.id == element]:
result.append(item)
return result | python | def __get_name_or_id(values, known):
"""
Return list of values that match attribute ``.id`` or ``.name`` of any object in list `known`.
:param str values: comma-separated list (i.e., a Python string) of items
:param list known: list of libcloud items to filter
:return: list of the libcloud items that match the given values
"""
result = list()
for element in [e.strip() for e in values.split(',')]:
for item in [i for i in known if i.name == element or i.id == element]:
result.append(item)
return result | [
"def",
"__get_name_or_id",
"(",
"values",
",",
"known",
")",
":",
"result",
"=",
"list",
"(",
")",
"for",
"element",
"in",
"[",
"e",
".",
"strip",
"(",
")",
"for",
"e",
"in",
"values",
".",
"split",
"(",
"','",
")",
"]",
":",
"for",
"item",
"in",... | Return list of values that match attribute ``.id`` or ``.name`` of any object in list `known`.
:param str values: comma-separated list (i.e., a Python string) of items
:param list known: list of libcloud items to filter
:return: list of the libcloud items that match the given values | [
"Return",
"list",
"of",
"values",
"that",
"match",
"attribute",
".",
"id",
"or",
".",
"name",
"of",
"any",
"object",
"in",
"list",
"known",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/providers/libcloud_provider.py#L261-L273 | train | 205,296 |
gc3-uzh-ch/elasticluster | elasticluster/share/playbooks/library/bootparam.py | _assemble_linux_cmdline | def _assemble_linux_cmdline(kv):
"""
Given a dictionary, assemble a Linux boot command line.
"""
# try to be compatible with Py2.4
parts = []
for k, v in kv.items():
if v is None:
parts.append(str(k))
else:
parts.append('%s=%s' % (k, v))
return ' '.join(parts) | python | def _assemble_linux_cmdline(kv):
"""
Given a dictionary, assemble a Linux boot command line.
"""
# try to be compatible with Py2.4
parts = []
for k, v in kv.items():
if v is None:
parts.append(str(k))
else:
parts.append('%s=%s' % (k, v))
return ' '.join(parts) | [
"def",
"_assemble_linux_cmdline",
"(",
"kv",
")",
":",
"# try to be compatible with Py2.4",
"parts",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"kv",
".",
"items",
"(",
")",
":",
"if",
"v",
"is",
"None",
":",
"parts",
".",
"append",
"(",
"str",
"(",
... | Given a dictionary, assemble a Linux boot command line. | [
"Given",
"a",
"dictionary",
"assemble",
"a",
"Linux",
"boot",
"command",
"line",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/share/playbooks/library/bootparam.py#L181-L192 | train | 205,297 |
gc3-uzh-ch/elasticluster | elasticluster/share/playbooks/library/bootparam.py | _edit_linux_cmdline | def _edit_linux_cmdline(cmdline, state, name, value=None):
"""
Return a new Linux command line, with parameter `name` added,
replaced, or removed.
"""
kv = _parse_linux_cmdline(cmdline)
if state == 'absent':
try:
del kv[name]
except KeyError:
pass
elif state == 'present':
kv[name] = value
return _assemble_linux_cmdline(kv) | python | def _edit_linux_cmdline(cmdline, state, name, value=None):
"""
Return a new Linux command line, with parameter `name` added,
replaced, or removed.
"""
kv = _parse_linux_cmdline(cmdline)
if state == 'absent':
try:
del kv[name]
except KeyError:
pass
elif state == 'present':
kv[name] = value
return _assemble_linux_cmdline(kv) | [
"def",
"_edit_linux_cmdline",
"(",
"cmdline",
",",
"state",
",",
"name",
",",
"value",
"=",
"None",
")",
":",
"kv",
"=",
"_parse_linux_cmdline",
"(",
"cmdline",
")",
"if",
"state",
"==",
"'absent'",
":",
"try",
":",
"del",
"kv",
"[",
"name",
"]",
"exce... | Return a new Linux command line, with parameter `name` added,
replaced, or removed. | [
"Return",
"a",
"new",
"Linux",
"command",
"line",
"with",
"parameter",
"name",
"added",
"replaced",
"or",
"removed",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/share/playbooks/library/bootparam.py#L195-L208 | train | 205,298 |
gc3-uzh-ch/elasticluster | elasticluster/subcommands.py | Start.execute | def execute(self):
"""
Starts a new cluster.
"""
cluster_template = self.params.cluster
if self.params.cluster_name:
cluster_name = self.params.cluster_name
else:
cluster_name = self.params.cluster
creator = make_creator(self.params.config,
storage_path=self.params.storage)
if cluster_template not in creator.cluster_conf:
raise ClusterNotFound(
"No cluster template named `{0}`"
.format(cluster_template))
# possibly overwrite node mix from config
cluster_nodes_conf = creator.cluster_conf[cluster_template]['nodes']
for kind, num in self.params.nodes_override.items():
if kind not in cluster_nodes_conf:
raise ConfigurationError(
"No node group `{kind}` defined"
" in cluster template `{template}`"
.format(kind=kind, template=cluster_template))
cluster_nodes_conf[kind]['num'] = num
# First, check if the cluster is already created.
try:
cluster = creator.load_cluster(cluster_name)
except ClusterNotFound:
try:
cluster = creator.create_cluster(
cluster_template, cluster_name)
except ConfigurationError as err:
log.error("Starting cluster %s: %s", cluster_template, err)
return
try:
print("Starting cluster `{0}` with:".format(cluster.name))
for cls in cluster.nodes:
print("* {0:d} {1} nodes.".format(len(cluster.nodes[cls]), cls))
print("(This may take a while...)")
min_nodes = dict((kind, cluster_nodes_conf[kind]['min_num'])
for kind in cluster_nodes_conf)
cluster.start(min_nodes, self.params.max_concurrent_requests)
if self.params.no_setup:
print("NOT configuring the cluster as requested.")
else:
print("Configuring the cluster ...")
print("(this too may take a while)")
ok = cluster.setup()
if ok:
print(
"\nYour cluster `{0}` is ready!"
.format(cluster.name))
else:
print(
"\nWARNING: YOUR CLUSTER `{0}` IS NOT READY YET!"
.format(cluster.name))
print(cluster_summary(cluster))
except (KeyError, ImageError, SecurityGroupError, ClusterError) as err:
log.error("Could not start cluster `%s`: %s", cluster.name, err)
raise | python | def execute(self):
"""
Starts a new cluster.
"""
cluster_template = self.params.cluster
if self.params.cluster_name:
cluster_name = self.params.cluster_name
else:
cluster_name = self.params.cluster
creator = make_creator(self.params.config,
storage_path=self.params.storage)
if cluster_template not in creator.cluster_conf:
raise ClusterNotFound(
"No cluster template named `{0}`"
.format(cluster_template))
# possibly overwrite node mix from config
cluster_nodes_conf = creator.cluster_conf[cluster_template]['nodes']
for kind, num in self.params.nodes_override.items():
if kind not in cluster_nodes_conf:
raise ConfigurationError(
"No node group `{kind}` defined"
" in cluster template `{template}`"
.format(kind=kind, template=cluster_template))
cluster_nodes_conf[kind]['num'] = num
# First, check if the cluster is already created.
try:
cluster = creator.load_cluster(cluster_name)
except ClusterNotFound:
try:
cluster = creator.create_cluster(
cluster_template, cluster_name)
except ConfigurationError as err:
log.error("Starting cluster %s: %s", cluster_template, err)
return
try:
print("Starting cluster `{0}` with:".format(cluster.name))
for cls in cluster.nodes:
print("* {0:d} {1} nodes.".format(len(cluster.nodes[cls]), cls))
print("(This may take a while...)")
min_nodes = dict((kind, cluster_nodes_conf[kind]['min_num'])
for kind in cluster_nodes_conf)
cluster.start(min_nodes, self.params.max_concurrent_requests)
if self.params.no_setup:
print("NOT configuring the cluster as requested.")
else:
print("Configuring the cluster ...")
print("(this too may take a while)")
ok = cluster.setup()
if ok:
print(
"\nYour cluster `{0}` is ready!"
.format(cluster.name))
else:
print(
"\nWARNING: YOUR CLUSTER `{0}` IS NOT READY YET!"
.format(cluster.name))
print(cluster_summary(cluster))
except (KeyError, ImageError, SecurityGroupError, ClusterError) as err:
log.error("Could not start cluster `%s`: %s", cluster.name, err)
raise | [
"def",
"execute",
"(",
"self",
")",
":",
"cluster_template",
"=",
"self",
".",
"params",
".",
"cluster",
"if",
"self",
".",
"params",
".",
"cluster_name",
":",
"cluster_name",
"=",
"self",
".",
"params",
".",
"cluster_name",
"else",
":",
"cluster_name",
"=... | Starts a new cluster. | [
"Starts",
"a",
"new",
"cluster",
"."
] | e6345633308c76de13b889417df572815aabe744 | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/subcommands.py#L172-L237 | train | 205,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.