repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
ev3dev/ev3dev-lang-python | ev3dev2/control/GyroBalancer.py | GyroBalancer._fast_write | def _fast_write(self, outfile, value):
"""Function for fast writing to motor files."""
outfile.truncate(0)
outfile.write(str(int(value)))
outfile.flush() | python | def _fast_write(self, outfile, value):
"""Function for fast writing to motor files."""
outfile.truncate(0)
outfile.write(str(int(value)))
outfile.flush() | [
"def",
"_fast_write",
"(",
"self",
",",
"outfile",
",",
"value",
")",
":",
"outfile",
".",
"truncate",
"(",
"0",
")",
"outfile",
".",
"write",
"(",
"str",
"(",
"int",
"(",
"value",
")",
")",
")",
"outfile",
".",
"flush",
"(",
")"
] | Function for fast writing to motor files. | [
"Function",
"for",
"fast",
"writing",
"to",
"motor",
"files",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/control/GyroBalancer.py#L179-L183 | train | 228,200 |
ev3dev/ev3dev-lang-python | ev3dev2/control/GyroBalancer.py | GyroBalancer._set_duty | def _set_duty(self, motor_duty_file, duty, friction_offset,
voltage_comp):
"""Function to set the duty cycle of the motors."""
# Compensate for nominal voltage and round the input
duty_int = int(round(duty*voltage_comp))
# Add or subtract offset and clamp the value between -100 and 100
if duty_int > 0:
duty_int = min(100, duty_int + friction_offset)
elif duty_int < 0:
duty_int = max(-100, duty_int - friction_offset)
# Apply the signal to the motor
self._fast_write(motor_duty_file, duty_int) | python | def _set_duty(self, motor_duty_file, duty, friction_offset,
voltage_comp):
"""Function to set the duty cycle of the motors."""
# Compensate for nominal voltage and round the input
duty_int = int(round(duty*voltage_comp))
# Add or subtract offset and clamp the value between -100 and 100
if duty_int > 0:
duty_int = min(100, duty_int + friction_offset)
elif duty_int < 0:
duty_int = max(-100, duty_int - friction_offset)
# Apply the signal to the motor
self._fast_write(motor_duty_file, duty_int) | [
"def",
"_set_duty",
"(",
"self",
",",
"motor_duty_file",
",",
"duty",
",",
"friction_offset",
",",
"voltage_comp",
")",
":",
"# Compensate for nominal voltage and round the input",
"duty_int",
"=",
"int",
"(",
"round",
"(",
"duty",
"*",
"voltage_comp",
")",
")",
"... | Function to set the duty cycle of the motors. | [
"Function",
"to",
"set",
"the",
"duty",
"cycle",
"of",
"the",
"motors",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/control/GyroBalancer.py#L185-L198 | train | 228,201 |
ev3dev/ev3dev-lang-python | ev3dev2/control/GyroBalancer.py | GyroBalancer.balance | def balance(self):
"""Run the _balance method as a thread."""
balance_thread = threading.Thread(target=self._balance)
balance_thread.start() | python | def balance(self):
"""Run the _balance method as a thread."""
balance_thread = threading.Thread(target=self._balance)
balance_thread.start() | [
"def",
"balance",
"(",
"self",
")",
":",
"balance_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"_balance",
")",
"balance_thread",
".",
"start",
"(",
")"
] | Run the _balance method as a thread. | [
"Run",
"the",
"_balance",
"method",
"as",
"a",
"thread",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/control/GyroBalancer.py#L212-L215 | train | 228,202 |
ev3dev/ev3dev-lang-python | ev3dev2/control/GyroBalancer.py | GyroBalancer._move | def _move(self, speed=0, steering=0, seconds=None):
"""Move robot."""
self.drive_queue.put((speed, steering))
if seconds is not None:
time.sleep(seconds)
self.drive_queue.put((0, 0))
self.drive_queue.join() | python | def _move(self, speed=0, steering=0, seconds=None):
"""Move robot."""
self.drive_queue.put((speed, steering))
if seconds is not None:
time.sleep(seconds)
self.drive_queue.put((0, 0))
self.drive_queue.join() | [
"def",
"_move",
"(",
"self",
",",
"speed",
"=",
"0",
",",
"steering",
"=",
"0",
",",
"seconds",
"=",
"None",
")",
":",
"self",
".",
"drive_queue",
".",
"put",
"(",
"(",
"speed",
",",
"steering",
")",
")",
"if",
"seconds",
"is",
"not",
"None",
":"... | Move robot. | [
"Move",
"robot",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/control/GyroBalancer.py#L475-L481 | train | 228,203 |
ev3dev/ev3dev-lang-python | ev3dev2/control/GyroBalancer.py | GyroBalancer.move_forward | def move_forward(self, seconds=None):
"""Move robot forward."""
self._move(speed=SPEED_MAX, steering=0, seconds=seconds) | python | def move_forward(self, seconds=None):
"""Move robot forward."""
self._move(speed=SPEED_MAX, steering=0, seconds=seconds) | [
"def",
"move_forward",
"(",
"self",
",",
"seconds",
"=",
"None",
")",
":",
"self",
".",
"_move",
"(",
"speed",
"=",
"SPEED_MAX",
",",
"steering",
"=",
"0",
",",
"seconds",
"=",
"seconds",
")"
] | Move robot forward. | [
"Move",
"robot",
"forward",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/control/GyroBalancer.py#L483-L485 | train | 228,204 |
ev3dev/ev3dev-lang-python | ev3dev2/control/GyroBalancer.py | GyroBalancer.move_backward | def move_backward(self, seconds=None):
"""Move robot backward."""
self._move(speed=-SPEED_MAX, steering=0, seconds=seconds) | python | def move_backward(self, seconds=None):
"""Move robot backward."""
self._move(speed=-SPEED_MAX, steering=0, seconds=seconds) | [
"def",
"move_backward",
"(",
"self",
",",
"seconds",
"=",
"None",
")",
":",
"self",
".",
"_move",
"(",
"speed",
"=",
"-",
"SPEED_MAX",
",",
"steering",
"=",
"0",
",",
"seconds",
"=",
"seconds",
")"
] | Move robot backward. | [
"Move",
"robot",
"backward",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/control/GyroBalancer.py#L487-L489 | train | 228,205 |
ev3dev/ev3dev-lang-python | ev3dev2/control/GyroBalancer.py | GyroBalancer.rotate_left | def rotate_left(self, seconds=None):
"""Rotate robot left."""
self._move(speed=0, steering=STEER_MAX, seconds=seconds) | python | def rotate_left(self, seconds=None):
"""Rotate robot left."""
self._move(speed=0, steering=STEER_MAX, seconds=seconds) | [
"def",
"rotate_left",
"(",
"self",
",",
"seconds",
"=",
"None",
")",
":",
"self",
".",
"_move",
"(",
"speed",
"=",
"0",
",",
"steering",
"=",
"STEER_MAX",
",",
"seconds",
"=",
"seconds",
")"
] | Rotate robot left. | [
"Rotate",
"robot",
"left",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/control/GyroBalancer.py#L491-L493 | train | 228,206 |
ev3dev/ev3dev-lang-python | ev3dev2/control/GyroBalancer.py | GyroBalancer.rotate_right | def rotate_right(self, seconds=None):
"""Rotate robot right."""
self._move(speed=0, steering=-STEER_MAX, seconds=seconds) | python | def rotate_right(self, seconds=None):
"""Rotate robot right."""
self._move(speed=0, steering=-STEER_MAX, seconds=seconds) | [
"def",
"rotate_right",
"(",
"self",
",",
"seconds",
"=",
"None",
")",
":",
"self",
".",
"_move",
"(",
"speed",
"=",
"0",
",",
"steering",
"=",
"-",
"STEER_MAX",
",",
"seconds",
"=",
"seconds",
")"
] | Rotate robot right. | [
"Rotate",
"robot",
"right",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/control/GyroBalancer.py#L495-L497 | train | 228,207 |
ev3dev/ev3dev-lang-python | ev3dev2/button.py | ButtonBase.evdev_device | def evdev_device(self):
"""
Return our corresponding evdev device object
"""
devices = [evdev.InputDevice(fn) for fn in evdev.list_devices()]
for device in devices:
if device.name == self.evdev_device_name:
return device
raise Exception("%s: could not find evdev device '%s'" % (self, self.evdev_device_name)) | python | def evdev_device(self):
"""
Return our corresponding evdev device object
"""
devices = [evdev.InputDevice(fn) for fn in evdev.list_devices()]
for device in devices:
if device.name == self.evdev_device_name:
return device
raise Exception("%s: could not find evdev device '%s'" % (self, self.evdev_device_name)) | [
"def",
"evdev_device",
"(",
"self",
")",
":",
"devices",
"=",
"[",
"evdev",
".",
"InputDevice",
"(",
"fn",
")",
"for",
"fn",
"in",
"evdev",
".",
"list_devices",
"(",
")",
"]",
"for",
"device",
"in",
"devices",
":",
"if",
"device",
".",
"name",
"==",
... | Return our corresponding evdev device object | [
"Return",
"our",
"corresponding",
"evdev",
"device",
"object"
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/button.py#L115-L125 | train | 228,208 |
ev3dev/ev3dev-lang-python | ev3dev2/button.py | ButtonBase.wait_for_bump | def wait_for_bump(self, buttons, timeout_ms=None):
"""
Wait for the button to be pressed down and then released.
Both actions must happen within timeout_ms.
"""
start_time = time.time()
if self.wait_for_pressed(buttons, timeout_ms):
if timeout_ms is not None:
timeout_ms -= int((time.time() - start_time) * 1000)
return self.wait_for_released(buttons, timeout_ms)
return False | python | def wait_for_bump(self, buttons, timeout_ms=None):
"""
Wait for the button to be pressed down and then released.
Both actions must happen within timeout_ms.
"""
start_time = time.time()
if self.wait_for_pressed(buttons, timeout_ms):
if timeout_ms is not None:
timeout_ms -= int((time.time() - start_time) * 1000)
return self.wait_for_released(buttons, timeout_ms)
return False | [
"def",
"wait_for_bump",
"(",
"self",
",",
"buttons",
",",
"timeout_ms",
"=",
"None",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"if",
"self",
".",
"wait_for_pressed",
"(",
"buttons",
",",
"timeout_ms",
")",
":",
"if",
"timeout_ms",
"is",... | Wait for the button to be pressed down and then released.
Both actions must happen within timeout_ms. | [
"Wait",
"for",
"the",
"button",
"to",
"be",
"pressed",
"down",
"and",
"then",
"released",
".",
"Both",
"actions",
"must",
"happen",
"within",
"timeout_ms",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/button.py#L202-L214 | train | 228,209 |
ev3dev/ev3dev-lang-python | ev3dev2/button.py | ButtonEVIO.buttons_pressed | def buttons_pressed(self):
"""
Returns list of names of pressed buttons.
"""
for b in self._buffer_cache:
fcntl.ioctl(self._button_file(b), self.EVIOCGKEY, self._buffer_cache[b])
pressed = []
for k, v in self._buttons.items():
buf = self._buffer_cache[v['name']]
bit = v['value']
if bool(buf[int(bit / 8)] & 1 << bit % 8):
pressed.append(k)
return pressed | python | def buttons_pressed(self):
"""
Returns list of names of pressed buttons.
"""
for b in self._buffer_cache:
fcntl.ioctl(self._button_file(b), self.EVIOCGKEY, self._buffer_cache[b])
pressed = []
for k, v in self._buttons.items():
buf = self._buffer_cache[v['name']]
bit = v['value']
if bool(buf[int(bit / 8)] & 1 << bit % 8):
pressed.append(k)
return pressed | [
"def",
"buttons_pressed",
"(",
"self",
")",
":",
"for",
"b",
"in",
"self",
".",
"_buffer_cache",
":",
"fcntl",
".",
"ioctl",
"(",
"self",
".",
"_button_file",
"(",
"b",
")",
",",
"self",
".",
"EVIOCGKEY",
",",
"self",
".",
"_buffer_cache",
"[",
"b",
... | Returns list of names of pressed buttons. | [
"Returns",
"list",
"of",
"names",
"of",
"pressed",
"buttons",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/button.py#L255-L270 | train | 228,210 |
aws/sagemaker-containers | src/sagemaker_containers/_mpi.py | _orted_process | def _orted_process():
"""Waits maximum of 5 minutes for orted process to start"""
for i in range(5 * 60):
procs = [p for p in psutil.process_iter(attrs=['name']) if p.info['name'] == 'orted']
if procs:
return procs
time.sleep(1) | python | def _orted_process():
"""Waits maximum of 5 minutes for orted process to start"""
for i in range(5 * 60):
procs = [p for p in psutil.process_iter(attrs=['name']) if p.info['name'] == 'orted']
if procs:
return procs
time.sleep(1) | [
"def",
"_orted_process",
"(",
")",
":",
"for",
"i",
"in",
"range",
"(",
"5",
"*",
"60",
")",
":",
"procs",
"=",
"[",
"p",
"for",
"p",
"in",
"psutil",
".",
"process_iter",
"(",
"attrs",
"=",
"[",
"'name'",
"]",
")",
"if",
"p",
".",
"info",
"[",
... | Waits maximum of 5 minutes for orted process to start | [
"Waits",
"maximum",
"of",
"5",
"minutes",
"for",
"orted",
"process",
"to",
"start"
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_mpi.py#L75-L82 | train | 228,211 |
aws/sagemaker-containers | src/sagemaker_containers/_mpi.py | _parse_custom_mpi_options | def _parse_custom_mpi_options(custom_mpi_options):
# type: (str) -> Tuple[argparse.Namespace, List[str]]
"""Parse custom MPI options provided by user. Known options default value will be overridden
and unknown options would be identified separately."""
parser = argparse.ArgumentParser()
parser.add_argument('--NCCL_DEBUG', default="INFO", type=str)
return parser.parse_known_args(custom_mpi_options.split()) | python | def _parse_custom_mpi_options(custom_mpi_options):
# type: (str) -> Tuple[argparse.Namespace, List[str]]
"""Parse custom MPI options provided by user. Known options default value will be overridden
and unknown options would be identified separately."""
parser = argparse.ArgumentParser()
parser.add_argument('--NCCL_DEBUG', default="INFO", type=str)
return parser.parse_known_args(custom_mpi_options.split()) | [
"def",
"_parse_custom_mpi_options",
"(",
"custom_mpi_options",
")",
":",
"# type: (str) -> Tuple[argparse.Namespace, List[str]]",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--NCCL_DEBUG'",
",",
"default",
"=",
"\"INFO... | Parse custom MPI options provided by user. Known options default value will be overridden
and unknown options would be identified separately. | [
"Parse",
"custom",
"MPI",
"options",
"provided",
"by",
"user",
".",
"Known",
"options",
"default",
"value",
"will",
"be",
"overridden",
"and",
"unknown",
"options",
"would",
"be",
"identified",
"separately",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_mpi.py#L230-L238 | train | 228,212 |
aws/sagemaker-containers | src/sagemaker_containers/_modules.py | download_and_install | def download_and_install(uri, name=DEFAULT_MODULE_NAME, cache=True):
# type: (str, str, bool) -> None
"""Download, prepare and install a compressed tar file from S3 or local directory as a module.
The SageMaker Python SDK saves the user provided scripts as compressed tar files in S3.
This function downloads this compressed file and, if provided, transforms it
into a module before installing it.
This method is the predecessor of :meth:`~sagemaker_containers.beta.framework.files.download_and_extract`
and has been kept for backward-compatibility purposes.
Args:
name (str): name of the script or module.
uri (str): the location of the module.
cache (bool): defaults to True. It will not download and install the module again if it is already installed.
"""
should_use_cache = cache and exists(name)
if not should_use_cache:
with _files.tmpdir() as tmpdir:
if uri.startswith('s3://'):
dst = os.path.join(tmpdir, 'tar_file')
_files.s3_download(uri, dst)
module_path = os.path.join(tmpdir, 'module_dir')
os.makedirs(module_path)
with tarfile.open(name=dst, mode='r:gz') as t:
t.extractall(path=module_path)
else:
module_path = uri
prepare(module_path, name)
install(module_path) | python | def download_and_install(uri, name=DEFAULT_MODULE_NAME, cache=True):
# type: (str, str, bool) -> None
"""Download, prepare and install a compressed tar file from S3 or local directory as a module.
The SageMaker Python SDK saves the user provided scripts as compressed tar files in S3.
This function downloads this compressed file and, if provided, transforms it
into a module before installing it.
This method is the predecessor of :meth:`~sagemaker_containers.beta.framework.files.download_and_extract`
and has been kept for backward-compatibility purposes.
Args:
name (str): name of the script or module.
uri (str): the location of the module.
cache (bool): defaults to True. It will not download and install the module again if it is already installed.
"""
should_use_cache = cache and exists(name)
if not should_use_cache:
with _files.tmpdir() as tmpdir:
if uri.startswith('s3://'):
dst = os.path.join(tmpdir, 'tar_file')
_files.s3_download(uri, dst)
module_path = os.path.join(tmpdir, 'module_dir')
os.makedirs(module_path)
with tarfile.open(name=dst, mode='r:gz') as t:
t.extractall(path=module_path)
else:
module_path = uri
prepare(module_path, name)
install(module_path) | [
"def",
"download_and_install",
"(",
"uri",
",",
"name",
"=",
"DEFAULT_MODULE_NAME",
",",
"cache",
"=",
"True",
")",
":",
"# type: (str, str, bool) -> None",
"should_use_cache",
"=",
"cache",
"and",
"exists",
"(",
"name",
")",
"if",
"not",
"should_use_cache",
":",
... | Download, prepare and install a compressed tar file from S3 or local directory as a module.
The SageMaker Python SDK saves the user provided scripts as compressed tar files in S3.
This function downloads this compressed file and, if provided, transforms it
into a module before installing it.
This method is the predecessor of :meth:`~sagemaker_containers.beta.framework.files.download_and_extract`
and has been kept for backward-compatibility purposes.
Args:
name (str): name of the script or module.
uri (str): the location of the module.
cache (bool): defaults to True. It will not download and install the module again if it is already installed. | [
"Download",
"prepare",
"and",
"install",
"a",
"compressed",
"tar",
"file",
"from",
"S3",
"or",
"local",
"directory",
"as",
"a",
"module",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_modules.py#L126-L159 | train | 228,213 |
aws/sagemaker-containers | src/sagemaker_containers/_modules.py | run | def run(module_name, args=None, env_vars=None, wait=True, capture_error=False):
# type: (str, list, dict, bool, bool) -> subprocess.Popen
"""Run Python module as a script.
Search sys.path for the named module and execute its contents as the __main__ module.
Since the argument is a module name, you must not give a file extension (.py). The module name should be a valid
absolute Python module name, but the implementation may not always enforce this (e.g. it may allow you to use a name
that includes a hyphen).
Package names (including namespace packages) are also permitted. When a package name is supplied instead of a
normal module, the interpreter will execute <pkg>.__main__ as the main module. This behaviour is deliberately
similar to the handling of directories and zipfiles that are passed to the interpreter as the script argument.
Note This option cannot be used with built-in modules and extension modules written in C, since they do not have
Python module files. However, it can still be used for precompiled modules, even if the original source file is
not available. If this option is given, the first element of sys.argv will be the full path to the module file (
while the module file is being located, the first element will be set to "-m"). As with the -c option,
the current directory will be added to the start of sys.path.
You can find more information at https://docs.python.org/3/using/cmdline.html#cmdoption-m
Example:
>>>import sagemaker_containers
>>>from sagemaker_containers.beta.framework import mapping, modules
>>>env = sagemaker_containers.training_env()
{'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = env.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>modules.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model
Args:
module_name (str): module name in the same format required by python -m <module-name> cli command.
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
"""
args = args or []
env_vars = env_vars or {}
cmd = [_process.python_executable(), '-m', module_name] + args
_logging.log_script_invocation(cmd, env_vars)
if wait:
return _process.check_error(cmd, _errors.ExecuteUserScriptError, capture_error=capture_error)
else:
return _process.create(cmd, _errors.ExecuteUserScriptError, capture_error=capture_error) | python | def run(module_name, args=None, env_vars=None, wait=True, capture_error=False):
# type: (str, list, dict, bool, bool) -> subprocess.Popen
"""Run Python module as a script.
Search sys.path for the named module and execute its contents as the __main__ module.
Since the argument is a module name, you must not give a file extension (.py). The module name should be a valid
absolute Python module name, but the implementation may not always enforce this (e.g. it may allow you to use a name
that includes a hyphen).
Package names (including namespace packages) are also permitted. When a package name is supplied instead of a
normal module, the interpreter will execute <pkg>.__main__ as the main module. This behaviour is deliberately
similar to the handling of directories and zipfiles that are passed to the interpreter as the script argument.
Note This option cannot be used with built-in modules and extension modules written in C, since they do not have
Python module files. However, it can still be used for precompiled modules, even if the original source file is
not available. If this option is given, the first element of sys.argv will be the full path to the module file (
while the module file is being located, the first element will be set to "-m"). As with the -c option,
the current directory will be added to the start of sys.path.
You can find more information at https://docs.python.org/3/using/cmdline.html#cmdoption-m
Example:
>>>import sagemaker_containers
>>>from sagemaker_containers.beta.framework import mapping, modules
>>>env = sagemaker_containers.training_env()
{'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = env.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>modules.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model
Args:
module_name (str): module name in the same format required by python -m <module-name> cli command.
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
"""
args = args or []
env_vars = env_vars or {}
cmd = [_process.python_executable(), '-m', module_name] + args
_logging.log_script_invocation(cmd, env_vars)
if wait:
return _process.check_error(cmd, _errors.ExecuteUserScriptError, capture_error=capture_error)
else:
return _process.create(cmd, _errors.ExecuteUserScriptError, capture_error=capture_error) | [
"def",
"run",
"(",
"module_name",
",",
"args",
"=",
"None",
",",
"env_vars",
"=",
"None",
",",
"wait",
"=",
"True",
",",
"capture_error",
"=",
"False",
")",
":",
"# type: (str, list, dict, bool, bool) -> subprocess.Popen",
"args",
"=",
"args",
"or",
"[",
"]",
... | Run Python module as a script.
Search sys.path for the named module and execute its contents as the __main__ module.
Since the argument is a module name, you must not give a file extension (.py). The module name should be a valid
absolute Python module name, but the implementation may not always enforce this (e.g. it may allow you to use a name
that includes a hyphen).
Package names (including namespace packages) are also permitted. When a package name is supplied instead of a
normal module, the interpreter will execute <pkg>.__main__ as the main module. This behaviour is deliberately
similar to the handling of directories and zipfiles that are passed to the interpreter as the script argument.
Note This option cannot be used with built-in modules and extension modules written in C, since they do not have
Python module files. However, it can still be used for precompiled modules, even if the original source file is
not available. If this option is given, the first element of sys.argv will be the full path to the module file (
while the module file is being located, the first element will be set to "-m"). As with the -c option,
the current directory will be added to the start of sys.path.
You can find more information at https://docs.python.org/3/using/cmdline.html#cmdoption-m
Example:
>>>import sagemaker_containers
>>>from sagemaker_containers.beta.framework import mapping, modules
>>>env = sagemaker_containers.training_env()
{'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = env.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>modules.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model
Args:
module_name (str): module name in the same format required by python -m <module-name> cli command.
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written.
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors. | [
"Run",
"Python",
"module",
"as",
"a",
"script",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_modules.py#L162-L225 | train | 228,214 |
aws/sagemaker-containers | src/sagemaker_containers/_modules.py | run_module | def run_module(uri, args, env_vars=None, name=DEFAULT_MODULE_NAME, cache=None, wait=True, capture_error=False):
# type: (str, list, dict, str, bool, bool, bool) -> subprocess.Popen
"""Download, prepare and executes a compressed tar file from S3 or provided directory as a module.
SageMaker Python SDK saves the user provided scripts as compressed tar files in S3
https://github.com/aws/sagemaker-python-sdk.
This function downloads this compressed file, transforms it as a module, and executes it.
Args:
uri (str): the location of the module.
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written.
name (str): name of the script or module.
cache (bool): If True it will avoid downloading the module again, if already installed.
wait (bool): If True run_module will wait for the user module to exit and check the exit code,
otherwise it will launch the user module with subprocess and return the process object.
"""
_warning_cache_deprecation(cache)
env_vars = env_vars or {}
env_vars = env_vars.copy()
_files.download_and_extract(uri, name, _env.code_dir)
prepare(_env.code_dir, name)
install(_env.code_dir)
_env.write_env_vars(env_vars)
return run(name, args, env_vars, wait, capture_error) | python | def run_module(uri, args, env_vars=None, name=DEFAULT_MODULE_NAME, cache=None, wait=True, capture_error=False):
# type: (str, list, dict, str, bool, bool, bool) -> subprocess.Popen
"""Download, prepare and executes a compressed tar file from S3 or provided directory as a module.
SageMaker Python SDK saves the user provided scripts as compressed tar files in S3
https://github.com/aws/sagemaker-python-sdk.
This function downloads this compressed file, transforms it as a module, and executes it.
Args:
uri (str): the location of the module.
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written.
name (str): name of the script or module.
cache (bool): If True it will avoid downloading the module again, if already installed.
wait (bool): If True run_module will wait for the user module to exit and check the exit code,
otherwise it will launch the user module with subprocess and return the process object.
"""
_warning_cache_deprecation(cache)
env_vars = env_vars or {}
env_vars = env_vars.copy()
_files.download_and_extract(uri, name, _env.code_dir)
prepare(_env.code_dir, name)
install(_env.code_dir)
_env.write_env_vars(env_vars)
return run(name, args, env_vars, wait, capture_error) | [
"def",
"run_module",
"(",
"uri",
",",
"args",
",",
"env_vars",
"=",
"None",
",",
"name",
"=",
"DEFAULT_MODULE_NAME",
",",
"cache",
"=",
"None",
",",
"wait",
"=",
"True",
",",
"capture_error",
"=",
"False",
")",
":",
"# type: (str, list, dict, str, bool, bool, ... | Download, prepare and executes a compressed tar file from S3 or provided directory as a module.
SageMaker Python SDK saves the user provided scripts as compressed tar files in S3
https://github.com/aws/sagemaker-python-sdk.
This function downloads this compressed file, transforms it as a module, and executes it.
Args:
uri (str): the location of the module.
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written.
name (str): name of the script or module.
cache (bool): If True it will avoid downloading the module again, if already installed.
wait (bool): If True run_module will wait for the user module to exit and check the exit code,
otherwise it will launch the user module with subprocess and return the process object. | [
"Download",
"prepare",
"and",
"executes",
"a",
"compressed",
"tar",
"file",
"from",
"S3",
"or",
"provided",
"directory",
"as",
"a",
"module",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_modules.py#L254-L281 | train | 228,215 |
aws/sagemaker-containers | src/sagemaker_containers/_worker.py | Request.content_type | def content_type(self): # type: () -> str
"""The request's content-type.
Returns:
(str): The value, if any, of the header 'ContentType' (used by some AWS services) and 'Content-Type'.
Otherwise, returns 'application/json' as default.
"""
# todo(mvsusp): consider a better default content-type
return self.headers.get('ContentType') or self.headers.get(
'Content-Type') or _content_types.JSON | python | def content_type(self): # type: () -> str
"""The request's content-type.
Returns:
(str): The value, if any, of the header 'ContentType' (used by some AWS services) and 'Content-Type'.
Otherwise, returns 'application/json' as default.
"""
# todo(mvsusp): consider a better default content-type
return self.headers.get('ContentType') or self.headers.get(
'Content-Type') or _content_types.JSON | [
"def",
"content_type",
"(",
"self",
")",
":",
"# type: () -> str",
"# todo(mvsusp): consider a better default content-type",
"return",
"self",
".",
"headers",
".",
"get",
"(",
"'ContentType'",
")",
"or",
"self",
".",
"headers",
".",
"get",
"(",
"'Content-Type'",
")"... | The request's content-type.
Returns:
(str): The value, if any, of the header 'ContentType' (used by some AWS services) and 'Content-Type'.
Otherwise, returns 'application/json' as default. | [
"The",
"request",
"s",
"content",
"-",
"type",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_worker.py#L135-L144 | train | 228,216 |
aws/sagemaker-containers | src/sagemaker_containers/_worker.py | Request.accept | def accept(self): # type: () -> str
"""The content-type for the response to the client.
Returns:
(str): The value of the header 'Accept' or the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT
environment variable.
"""
accept = self.headers.get('Accept')
if not accept or accept == _content_types.ANY:
return self._default_accept
else:
return accept | python | def accept(self): # type: () -> str
"""The content-type for the response to the client.
Returns:
(str): The value of the header 'Accept' or the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT
environment variable.
"""
accept = self.headers.get('Accept')
if not accept or accept == _content_types.ANY:
return self._default_accept
else:
return accept | [
"def",
"accept",
"(",
"self",
")",
":",
"# type: () -> str",
"accept",
"=",
"self",
".",
"headers",
".",
"get",
"(",
"'Accept'",
")",
"if",
"not",
"accept",
"or",
"accept",
"==",
"_content_types",
".",
"ANY",
":",
"return",
"self",
".",
"_default_accept",
... | The content-type for the response to the client.
Returns:
(str): The value of the header 'Accept' or the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT
environment variable. | [
"The",
"content",
"-",
"type",
"for",
"the",
"response",
"to",
"the",
"client",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_worker.py#L147-L159 | train | 228,217 |
aws/sagemaker-containers | src/sagemaker_containers/_worker.py | Request.content | def content(self): # type: () -> object
"""The request incoming data.
It automatic decodes from utf-8
Returns:
(obj): incoming data
"""
as_text = self.content_type in _content_types.UTF8_TYPES
return self.get_data(as_text=as_text) | python | def content(self): # type: () -> object
"""The request incoming data.
It automatic decodes from utf-8
Returns:
(obj): incoming data
"""
as_text = self.content_type in _content_types.UTF8_TYPES
return self.get_data(as_text=as_text) | [
"def",
"content",
"(",
"self",
")",
":",
"# type: () -> object",
"as_text",
"=",
"self",
".",
"content_type",
"in",
"_content_types",
".",
"UTF8_TYPES",
"return",
"self",
".",
"get_data",
"(",
"as_text",
"=",
"as_text",
")"
] | The request incoming data.
It automatic decodes from utf-8
Returns:
(obj): incoming data | [
"The",
"request",
"incoming",
"data",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_worker.py#L162-L172 | train | 228,218 |
aws/sagemaker-containers | src/sagemaker_containers/entry_point.py | run | def run(uri,
user_entry_point,
args,
env_vars=None,
wait=True,
capture_error=False,
runner=_runner.ProcessRunnerType,
extra_opts=None):
# type: (str, str, List[str], Dict[str, str], bool, bool, _runner.RunnerType, Dict[str, str]) -> None
"""Download, prepare and executes a compressed tar file from S3 or provided directory as an user
entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command
arguments.
If the entry point is:
- A Python package: executes the packages as >>> env_vars python -m module_name + args
- A Python script: executes the script as >>> env_vars python module_name + args
- Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args
Example:
>>>import sagemaker_containers
>>>from sagemaker_containers.beta.framework import entry_point
>>>env = sagemaker_containers.training_env()
{'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = env.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>entry_point.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model
Args:
uri (str): the location of the module.
user_entry_point (str): name of the user provided entry point
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written (default: None).
wait (bool): If the user entry point should be run to completion before this method returns
(default: True).
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to
be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType).
extra_opts (dict): Additional options for running the entry point (default: None).
Currently, this only applies for MPI.
Returns:
sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for
executing the entry point.
"""
env_vars = env_vars or {}
env_vars = env_vars.copy()
_files.download_and_extract(uri, user_entry_point, _env.code_dir)
install(user_entry_point, _env.code_dir, capture_error)
_env.write_env_vars(env_vars)
return _runner.get(runner, user_entry_point, args, env_vars, extra_opts).run(wait, capture_error) | python | def run(uri,
user_entry_point,
args,
env_vars=None,
wait=True,
capture_error=False,
runner=_runner.ProcessRunnerType,
extra_opts=None):
# type: (str, str, List[str], Dict[str, str], bool, bool, _runner.RunnerType, Dict[str, str]) -> None
"""Download, prepare and executes a compressed tar file from S3 or provided directory as an user
entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command
arguments.
If the entry point is:
- A Python package: executes the packages as >>> env_vars python -m module_name + args
- A Python script: executes the script as >>> env_vars python module_name + args
- Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args
Example:
>>>import sagemaker_containers
>>>from sagemaker_containers.beta.framework import entry_point
>>>env = sagemaker_containers.training_env()
{'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = env.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>entry_point.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model
Args:
uri (str): the location of the module.
user_entry_point (str): name of the user provided entry point
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written (default: None).
wait (bool): If the user entry point should be run to completion before this method returns
(default: True).
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to
be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType).
extra_opts (dict): Additional options for running the entry point (default: None).
Currently, this only applies for MPI.
Returns:
sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for
executing the entry point.
"""
env_vars = env_vars or {}
env_vars = env_vars.copy()
_files.download_and_extract(uri, user_entry_point, _env.code_dir)
install(user_entry_point, _env.code_dir, capture_error)
_env.write_env_vars(env_vars)
return _runner.get(runner, user_entry_point, args, env_vars, extra_opts).run(wait, capture_error) | [
"def",
"run",
"(",
"uri",
",",
"user_entry_point",
",",
"args",
",",
"env_vars",
"=",
"None",
",",
"wait",
"=",
"True",
",",
"capture_error",
"=",
"False",
",",
"runner",
"=",
"_runner",
".",
"ProcessRunnerType",
",",
"extra_opts",
"=",
"None",
")",
":",... | Download, prepare and executes a compressed tar file from S3 or provided directory as an user
entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command
arguments.
If the entry point is:
- A Python package: executes the packages as >>> env_vars python -m module_name + args
- A Python script: executes the script as >>> env_vars python module_name + args
- Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args
Example:
>>>import sagemaker_containers
>>>from sagemaker_containers.beta.framework import entry_point
>>>env = sagemaker_containers.training_env()
{'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...}
>>>hyperparameters = env.hyperparameters
{'batch-size': 128, 'model_dir': '/opt/ml/model'}
>>>args = mapping.to_cmd_args(hyperparameters)
['--batch-size', '128', '--model_dir', '/opt/ml/model']
>>>env_vars = mapping.to_env_vars()
['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training',
'MODEL_DIR':'/opt/ml/model', ...}
>>>entry_point.run('user_script', args, env_vars)
SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \
SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model
Args:
uri (str): the location of the module.
user_entry_point (str): name of the user provided entry point
args (list): A list of program arguments.
env_vars (dict): A map containing the environment variables to be written (default: None).
wait (bool): If the user entry point should be run to completion before this method returns
(default: True).
capture_error (bool): Default false. If True, the running process captures the
stderr, and appends it to the returned Exception message in case of errors.
runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to
be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType).
extra_opts (dict): Additional options for running the entry point (default: None).
Currently, this only applies for MPI.
Returns:
sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for
executing the entry point. | [
"Download",
"prepare",
"and",
"executes",
"a",
"compressed",
"tar",
"file",
"from",
"S3",
"or",
"provided",
"directory",
"as",
"an",
"user",
"entrypoint",
".",
"Runs",
"the",
"user",
"entry",
"point",
"passing",
"env_vars",
"as",
"environment",
"variables",
"a... | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/entry_point.py#L22-L89 | train | 228,219 |
aws/sagemaker-containers | src/sagemaker_containers/_logging.py | configure_logger | def configure_logger(level, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s'):
# type: (int, str) -> None
"""Set logger configuration.
Args:
level (int): Logger level
format (str): Logger format
"""
logging.basicConfig(format=format, level=level)
if level >= logging.INFO:
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('s3transfer').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARN) | python | def configure_logger(level, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s'):
# type: (int, str) -> None
"""Set logger configuration.
Args:
level (int): Logger level
format (str): Logger format
"""
logging.basicConfig(format=format, level=level)
if level >= logging.INFO:
logging.getLogger('boto3').setLevel(logging.INFO)
logging.getLogger('s3transfer').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARN) | [
"def",
"configure_logger",
"(",
"level",
",",
"format",
"=",
"'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'",
")",
":",
"# type: (int, str) -> None",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"format",
",",
"level",
"=",
"level",
")",
"if",
"level",
... | Set logger configuration.
Args:
level (int): Logger level
format (str): Logger format | [
"Set",
"logger",
"configuration",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_logging.py#L25-L38 | train | 228,220 |
aws/sagemaker-containers | src/sagemaker_containers/_intermediate_output.py | _timestamp | def _timestamp():
"""Return a timestamp with microsecond precision."""
moment = time.time()
moment_us = repr(moment).split('.')[1]
return time.strftime("%Y-%m-%d-%H-%M-%S-{}".format(moment_us), time.gmtime(moment)) | python | def _timestamp():
"""Return a timestamp with microsecond precision."""
moment = time.time()
moment_us = repr(moment).split('.')[1]
return time.strftime("%Y-%m-%d-%H-%M-%S-{}".format(moment_us), time.gmtime(moment)) | [
"def",
"_timestamp",
"(",
")",
":",
"moment",
"=",
"time",
".",
"time",
"(",
")",
"moment_us",
"=",
"repr",
"(",
"moment",
")",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"return",
"time",
".",
"strftime",
"(",
"\"%Y-%m-%d-%H-%M-%S-{}\"",
".",
"fo... | Return a timestamp with microsecond precision. | [
"Return",
"a",
"timestamp",
"with",
"microsecond",
"precision",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_intermediate_output.py#L36-L40 | train | 228,221 |
aws/sagemaker-containers | src/sagemaker_containers/_mapping.py | split_by_criteria | def split_by_criteria(dictionary, keys=None, prefix=None): # type: (dict, set or list or tuple) -> SplitResultSpec
"""Split a dictionary in two by the provided keys.
Args:
dictionary (dict[str, object]): A Python dictionary
keys (sequence [str]): A sequence of keys which will be added the split criteria
prefix (str): A prefix which will be added the split criteria
Returns:
`SplitResultSpec` : A collections.namedtuple with the following attributes:
* Args:
included (dict[str, object]: A dictionary with the keys included in the criteria.
excluded (dict[str, object]: A dictionary with the keys not included in the criteria.
"""
keys = keys or []
keys = set(keys)
included_items = {k: dictionary[k] for k in dictionary.keys() if k in keys or (prefix and k.startswith(prefix))}
excluded_items = {k: dictionary[k] for k in dictionary.keys() if k not in included_items}
return SplitResultSpec(included=included_items, excluded=excluded_items) | python | def split_by_criteria(dictionary, keys=None, prefix=None): # type: (dict, set or list or tuple) -> SplitResultSpec
"""Split a dictionary in two by the provided keys.
Args:
dictionary (dict[str, object]): A Python dictionary
keys (sequence [str]): A sequence of keys which will be added the split criteria
prefix (str): A prefix which will be added the split criteria
Returns:
`SplitResultSpec` : A collections.namedtuple with the following attributes:
* Args:
included (dict[str, object]: A dictionary with the keys included in the criteria.
excluded (dict[str, object]: A dictionary with the keys not included in the criteria.
"""
keys = keys or []
keys = set(keys)
included_items = {k: dictionary[k] for k in dictionary.keys() if k in keys or (prefix and k.startswith(prefix))}
excluded_items = {k: dictionary[k] for k in dictionary.keys() if k not in included_items}
return SplitResultSpec(included=included_items, excluded=excluded_items) | [
"def",
"split_by_criteria",
"(",
"dictionary",
",",
"keys",
"=",
"None",
",",
"prefix",
"=",
"None",
")",
":",
"# type: (dict, set or list or tuple) -> SplitResultSpec",
"keys",
"=",
"keys",
"or",
"[",
"]",
"keys",
"=",
"set",
"(",
"keys",
")",
"included_items",... | Split a dictionary in two by the provided keys.
Args:
dictionary (dict[str, object]): A Python dictionary
keys (sequence [str]): A sequence of keys which will be added the split criteria
prefix (str): A prefix which will be added the split criteria
Returns:
`SplitResultSpec` : A collections.namedtuple with the following attributes:
* Args:
included (dict[str, object]: A dictionary with the keys included in the criteria.
excluded (dict[str, object]: A dictionary with the keys not included in the criteria. | [
"Split",
"a",
"dictionary",
"in",
"two",
"by",
"the",
"provided",
"keys",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_mapping.py#L119-L140 | train | 228,222 |
aws/sagemaker-containers | src/sagemaker_containers/_transformer.py | default_output_fn | def default_output_fn(prediction, accept):
"""Function responsible to serialize the prediction for the response.
Args:
prediction (obj): prediction returned by predict_fn .
accept (str): accept content-type expected by the client.
Returns:
(worker.Response): a Flask response object with the following args:
* Args:
response: the serialized data to return
accept: the content-type that the data was transformed to.
"""
return _worker.Response(response=_encoders.encode(prediction, accept), mimetype=accept) | python | def default_output_fn(prediction, accept):
"""Function responsible to serialize the prediction for the response.
Args:
prediction (obj): prediction returned by predict_fn .
accept (str): accept content-type expected by the client.
Returns:
(worker.Response): a Flask response object with the following args:
* Args:
response: the serialized data to return
accept: the content-type that the data was transformed to.
"""
return _worker.Response(response=_encoders.encode(prediction, accept), mimetype=accept) | [
"def",
"default_output_fn",
"(",
"prediction",
",",
"accept",
")",
":",
"return",
"_worker",
".",
"Response",
"(",
"response",
"=",
"_encoders",
".",
"encode",
"(",
"prediction",
",",
"accept",
")",
",",
"mimetype",
"=",
"accept",
")"
] | Function responsible to serialize the prediction for the response.
Args:
prediction (obj): prediction returned by predict_fn .
accept (str): accept content-type expected by the client.
Returns:
(worker.Response): a Flask response object with the following args:
* Args:
response: the serialized data to return
accept: the content-type that the data was transformed to. | [
"Function",
"responsible",
"to",
"serialize",
"the",
"prediction",
"for",
"the",
"response",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_transformer.py#L77-L91 | train | 228,223 |
aws/sagemaker-containers | src/sagemaker_containers/_transformer.py | Transformer.transform | def transform(self): # type: () -> _worker.Response
"""Take a request with input data, deserialize it, make a prediction, and return a
serialized response.
Returns:
sagemaker_containers.beta.framework.worker.Response: a Flask response object with
the following args:
* response: the serialized data to return
* accept: the content type that the data was serialized into
"""
request = _worker.Request()
result = self._transform_fn(self._model, request.content, request.content_type, request.accept)
if isinstance(result, tuple):
# transforms tuple in Response for backwards compatibility
return _worker.Response(response=result[0], mimetype=result[1])
return result | python | def transform(self): # type: () -> _worker.Response
"""Take a request with input data, deserialize it, make a prediction, and return a
serialized response.
Returns:
sagemaker_containers.beta.framework.worker.Response: a Flask response object with
the following args:
* response: the serialized data to return
* accept: the content type that the data was serialized into
"""
request = _worker.Request()
result = self._transform_fn(self._model, request.content, request.content_type, request.accept)
if isinstance(result, tuple):
# transforms tuple in Response for backwards compatibility
return _worker.Response(response=result[0], mimetype=result[1])
return result | [
"def",
"transform",
"(",
"self",
")",
":",
"# type: () -> _worker.Response",
"request",
"=",
"_worker",
".",
"Request",
"(",
")",
"result",
"=",
"self",
".",
"_transform_fn",
"(",
"self",
".",
"_model",
",",
"request",
".",
"content",
",",
"request",
".",
... | Take a request with input data, deserialize it, make a prediction, and return a
serialized response.
Returns:
sagemaker_containers.beta.framework.worker.Response: a Flask response object with
the following args:
* response: the serialized data to return
* accept: the content type that the data was serialized into | [
"Take",
"a",
"request",
"with",
"input",
"data",
"deserialize",
"it",
"make",
"a",
"prediction",
"and",
"return",
"a",
"serialized",
"response",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_transformer.py#L159-L177 | train | 228,224 |
aws/sagemaker-containers | src/sagemaker_containers/_transformer.py | Transformer._default_transform_fn | def _default_transform_fn(self, model, content, content_type, accept):
"""Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type)
"""
try:
data = self._input_fn(content, content_type)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE)
prediction = self._predict_fn(data, model)
try:
result = self._output_fn(prediction, accept)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.NOT_ACCEPTABLE)
return result | python | def _default_transform_fn(self, model, content, content_type, accept):
"""Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type)
"""
try:
data = self._input_fn(content, content_type)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE)
prediction = self._predict_fn(data, model)
try:
result = self._output_fn(prediction, accept)
except _errors.UnsupportedFormatError as e:
return self._error_response(e, http_client.NOT_ACCEPTABLE)
return result | [
"def",
"_default_transform_fn",
"(",
"self",
",",
"model",
",",
"content",
",",
"content_type",
",",
"accept",
")",
":",
"try",
":",
"data",
"=",
"self",
".",
"_input_fn",
"(",
"content",
",",
"content_type",
")",
"except",
"_errors",
".",
"UnsupportedFormat... | Make predictions against the model and return a serialized response.
This serves as the default implementation of transform_fn, used when the user has not
implemented one themselves.
Args:
model (obj): model loaded by model_fn.
content: request content.
content_type (str): the request Content-Type.
accept (str): accept content-type expected by the client.
Returns:
sagemaker_containers.beta.framework.worker.Response or tuple:
the serialized response data and its content type, either as a Response object or
a tuple of the form (response_data, content_type) | [
"Make",
"predictions",
"against",
"the",
"model",
"and",
"return",
"a",
"serialized",
"response",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_transformer.py#L179-L208 | train | 228,225 |
aws/sagemaker-containers | src/sagemaker_containers/__init__.py | training_env | def training_env(): # type: () -> _env.TrainingEnv
"""Create a TrainingEnv.
Returns:
TrainingEnv: an instance of TrainingEnv
"""
from sagemaker_containers import _env
return _env.TrainingEnv(
resource_config=_env.read_resource_config(),
input_data_config=_env.read_input_data_config(),
hyperparameters=_env.read_hyperparameters()) | python | def training_env(): # type: () -> _env.TrainingEnv
"""Create a TrainingEnv.
Returns:
TrainingEnv: an instance of TrainingEnv
"""
from sagemaker_containers import _env
return _env.TrainingEnv(
resource_config=_env.read_resource_config(),
input_data_config=_env.read_input_data_config(),
hyperparameters=_env.read_hyperparameters()) | [
"def",
"training_env",
"(",
")",
":",
"# type: () -> _env.TrainingEnv",
"from",
"sagemaker_containers",
"import",
"_env",
"return",
"_env",
".",
"TrainingEnv",
"(",
"resource_config",
"=",
"_env",
".",
"read_resource_config",
"(",
")",
",",
"input_data_config",
"=",
... | Create a TrainingEnv.
Returns:
TrainingEnv: an instance of TrainingEnv | [
"Create",
"a",
"TrainingEnv",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/__init__.py#L16-L28 | train | 228,226 |
aws/sagemaker-containers | src/sagemaker_containers/_env.py | _write_json | def _write_json(obj, path): # type: (object, str) -> None
"""Writes a serializeable object as a JSON file"""
with open(path, 'w') as f:
json.dump(obj, f) | python | def _write_json(obj, path): # type: (object, str) -> None
"""Writes a serializeable object as a JSON file"""
with open(path, 'w') as f:
json.dump(obj, f) | [
"def",
"_write_json",
"(",
"obj",
",",
"path",
")",
":",
"# type: (object, str) -> None",
"with",
"open",
"(",
"path",
",",
"'w'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"obj",
",",
"f",
")"
] | Writes a serializeable object as a JSON file | [
"Writes",
"a",
"serializeable",
"object",
"as",
"a",
"JSON",
"file"
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_env.py#L36-L39 | train | 228,227 |
aws/sagemaker-containers | src/sagemaker_containers/_env.py | _create_training_directories | def _create_training_directories():
"""Creates the directory structure and files necessary for training under the base path
"""
logger.info('Creating a new training folder under %s .' % base_dir)
os.makedirs(model_dir)
os.makedirs(input_config_dir)
os.makedirs(output_data_dir)
_write_json({}, hyperparameters_file_dir)
_write_json({}, input_data_config_file_dir)
host_name = socket.gethostname()
resources_dict = {
"current_host": host_name,
"hosts": [host_name]
}
_write_json(resources_dict, resource_config_file_dir) | python | def _create_training_directories():
"""Creates the directory structure and files necessary for training under the base path
"""
logger.info('Creating a new training folder under %s .' % base_dir)
os.makedirs(model_dir)
os.makedirs(input_config_dir)
os.makedirs(output_data_dir)
_write_json({}, hyperparameters_file_dir)
_write_json({}, input_data_config_file_dir)
host_name = socket.gethostname()
resources_dict = {
"current_host": host_name,
"hosts": [host_name]
}
_write_json(resources_dict, resource_config_file_dir) | [
"def",
"_create_training_directories",
"(",
")",
":",
"logger",
".",
"info",
"(",
"'Creating a new training folder under %s .'",
"%",
"base_dir",
")",
"os",
".",
"makedirs",
"(",
"model_dir",
")",
"os",
".",
"makedirs",
"(",
"input_config_dir",
")",
"os",
".",
"... | Creates the directory structure and files necessary for training under the base path | [
"Creates",
"the",
"directory",
"structure",
"and",
"files",
"necessary",
"for",
"training",
"under",
"the",
"base",
"path"
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_env.py#L150-L168 | train | 228,228 |
aws/sagemaker-containers | src/sagemaker_containers/_env.py | num_gpus | def num_gpus(): # type: () -> int
"""The number of gpus available in the current container.
Returns:
int: number of gpus available in the current container.
"""
try:
cmd = shlex.split('nvidia-smi --list-gpus')
output = subprocess.check_output(cmd).decode('utf-8')
return sum([1 for x in output.split('\n') if x.startswith('GPU ')])
except (OSError, subprocess.CalledProcessError):
logger.info('No GPUs detected (normal if no gpus installed)')
return 0 | python | def num_gpus(): # type: () -> int
"""The number of gpus available in the current container.
Returns:
int: number of gpus available in the current container.
"""
try:
cmd = shlex.split('nvidia-smi --list-gpus')
output = subprocess.check_output(cmd).decode('utf-8')
return sum([1 for x in output.split('\n') if x.startswith('GPU ')])
except (OSError, subprocess.CalledProcessError):
logger.info('No GPUs detected (normal if no gpus installed)')
return 0 | [
"def",
"num_gpus",
"(",
")",
":",
"# type: () -> int",
"try",
":",
"cmd",
"=",
"shlex",
".",
"split",
"(",
"'nvidia-smi --list-gpus'",
")",
"output",
"=",
"subprocess",
".",
"check_output",
"(",
"cmd",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"return",
"su... | The number of gpus available in the current container.
Returns:
int: number of gpus available in the current container. | [
"The",
"number",
"of",
"gpus",
"available",
"in",
"the",
"current",
"container",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_env.py#L285-L297 | train | 228,229 |
aws/sagemaker-containers | src/sagemaker_containers/_env.py | write_env_vars | def write_env_vars(env_vars=None): # type: (dict) -> None
"""Write the dictionary env_vars in the system, as environment variables.
Args:
env_vars ():
Returns:
"""
env_vars = env_vars or {}
env_vars['PYTHONPATH'] = ':'.join(sys.path)
for name, value in env_vars.items():
os.environ[name] = value | python | def write_env_vars(env_vars=None): # type: (dict) -> None
"""Write the dictionary env_vars in the system, as environment variables.
Args:
env_vars ():
Returns:
"""
env_vars = env_vars or {}
env_vars['PYTHONPATH'] = ':'.join(sys.path)
for name, value in env_vars.items():
os.environ[name] = value | [
"def",
"write_env_vars",
"(",
"env_vars",
"=",
"None",
")",
":",
"# type: (dict) -> None",
"env_vars",
"=",
"env_vars",
"or",
"{",
"}",
"env_vars",
"[",
"'PYTHONPATH'",
"]",
"=",
"':'",
".",
"join",
"(",
"sys",
".",
"path",
")",
"for",
"name",
",",
"valu... | Write the dictionary env_vars in the system, as environment variables.
Args:
env_vars ():
Returns: | [
"Write",
"the",
"dictionary",
"env_vars",
"in",
"the",
"system",
"as",
"environment",
"variables",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_env.py#L925-L938 | train | 228,230 |
aws/sagemaker-containers | src/sagemaker_containers/_env.py | TrainingEnv.to_env_vars | def to_env_vars(self):
"""Environment variable representation of the training environment
Returns:
dict: an instance of dictionary
"""
env = {
'hosts': self.hosts, 'network_interface_name': self.network_interface_name,
'hps': self.hyperparameters, 'user_entry_point': self.user_entry_point,
'framework_params': self.additional_framework_parameters,
'resource_config': self.resource_config, 'input_data_config': self.input_data_config,
'output_data_dir': self.output_data_dir,
'channels': sorted(self.channel_input_dirs.keys()),
'current_host': self.current_host, 'module_name': self.module_name,
'log_level': self.log_level,
'framework_module': self.framework_module, 'input_dir': self.input_dir,
'input_config_dir': self.input_config_dir, 'output_dir': self.output_dir,
'num_cpus': self.num_cpus,
'num_gpus': self.num_gpus, 'model_dir': self.model_dir, 'module_dir': self.module_dir,
'training_env': dict(self), 'user_args': self.to_cmd_args(),
'output_intermediate_dir': self.output_intermediate_dir
}
for name, path in self.channel_input_dirs.items():
env['channel_%s' % name] = path
for key, value in self.hyperparameters.items():
env['hp_%s' % key] = value
return _mapping.to_env_vars(env) | python | def to_env_vars(self):
"""Environment variable representation of the training environment
Returns:
dict: an instance of dictionary
"""
env = {
'hosts': self.hosts, 'network_interface_name': self.network_interface_name,
'hps': self.hyperparameters, 'user_entry_point': self.user_entry_point,
'framework_params': self.additional_framework_parameters,
'resource_config': self.resource_config, 'input_data_config': self.input_data_config,
'output_data_dir': self.output_data_dir,
'channels': sorted(self.channel_input_dirs.keys()),
'current_host': self.current_host, 'module_name': self.module_name,
'log_level': self.log_level,
'framework_module': self.framework_module, 'input_dir': self.input_dir,
'input_config_dir': self.input_config_dir, 'output_dir': self.output_dir,
'num_cpus': self.num_cpus,
'num_gpus': self.num_gpus, 'model_dir': self.model_dir, 'module_dir': self.module_dir,
'training_env': dict(self), 'user_args': self.to_cmd_args(),
'output_intermediate_dir': self.output_intermediate_dir
}
for name, path in self.channel_input_dirs.items():
env['channel_%s' % name] = path
for key, value in self.hyperparameters.items():
env['hp_%s' % key] = value
return _mapping.to_env_vars(env) | [
"def",
"to_env_vars",
"(",
"self",
")",
":",
"env",
"=",
"{",
"'hosts'",
":",
"self",
".",
"hosts",
",",
"'network_interface_name'",
":",
"self",
".",
"network_interface_name",
",",
"'hps'",
":",
"self",
".",
"hyperparameters",
",",
"'user_entry_point'",
":",
... | Environment variable representation of the training environment
Returns:
dict: an instance of dictionary | [
"Environment",
"variable",
"representation",
"of",
"the",
"training",
"environment"
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_env.py#L641-L671 | train | 228,231 |
aws/sagemaker-containers | src/sagemaker_containers/_encoders.py | array_to_npy | def array_to_npy(array_like): # type: (np.array or Iterable or int or float) -> object
"""Convert an array like object to the NPY format.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to NPY.
Returns:
(obj): NPY array.
"""
buffer = BytesIO()
np.save(buffer, array_like)
return buffer.getvalue() | python | def array_to_npy(array_like): # type: (np.array or Iterable or int or float) -> object
"""Convert an array like object to the NPY format.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to NPY.
Returns:
(obj): NPY array.
"""
buffer = BytesIO()
np.save(buffer, array_like)
return buffer.getvalue() | [
"def",
"array_to_npy",
"(",
"array_like",
")",
":",
"# type: (np.array or Iterable or int or float) -> object",
"buffer",
"=",
"BytesIO",
"(",
")",
"np",
".",
"save",
"(",
"buffer",
",",
"array_like",
")",
"return",
"buffer",
".",
"getvalue",
"(",
")"
] | Convert an array like object to the NPY format.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to NPY.
Returns:
(obj): NPY array. | [
"Convert",
"an",
"array",
"like",
"object",
"to",
"the",
"NPY",
"format",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L24-L38 | train | 228,232 |
aws/sagemaker-containers | src/sagemaker_containers/_encoders.py | npy_to_numpy | def npy_to_numpy(npy_array): # type: (object) -> np.array
"""Convert an NPY array into numpy.
Args:
npy_array (npy array): to be converted to numpy array
Returns:
(np.array): converted numpy array.
"""
stream = BytesIO(npy_array)
return np.load(stream, allow_pickle=True) | python | def npy_to_numpy(npy_array): # type: (object) -> np.array
"""Convert an NPY array into numpy.
Args:
npy_array (npy array): to be converted to numpy array
Returns:
(np.array): converted numpy array.
"""
stream = BytesIO(npy_array)
return np.load(stream, allow_pickle=True) | [
"def",
"npy_to_numpy",
"(",
"npy_array",
")",
":",
"# type: (object) -> np.array",
"stream",
"=",
"BytesIO",
"(",
"npy_array",
")",
"return",
"np",
".",
"load",
"(",
"stream",
",",
"allow_pickle",
"=",
"True",
")"
] | Convert an NPY array into numpy.
Args:
npy_array (npy array): to be converted to numpy array
Returns:
(np.array): converted numpy array. | [
"Convert",
"an",
"NPY",
"array",
"into",
"numpy",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L41-L50 | train | 228,233 |
aws/sagemaker-containers | src/sagemaker_containers/_encoders.py | array_to_json | def array_to_json(array_like): # type: (np.array or Iterable or int or float) -> str
"""Convert an array like object to JSON.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to JSON.
Returns:
(str): object serialized to JSON
"""
def default(_array_like):
if hasattr(_array_like, 'tolist'):
return _array_like.tolist()
return json.JSONEncoder().default(_array_like)
return json.dumps(array_like, default=default) | python | def array_to_json(array_like): # type: (np.array or Iterable or int or float) -> str
"""Convert an array like object to JSON.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to JSON.
Returns:
(str): object serialized to JSON
"""
def default(_array_like):
if hasattr(_array_like, 'tolist'):
return _array_like.tolist()
return json.JSONEncoder().default(_array_like)
return json.dumps(array_like, default=default) | [
"def",
"array_to_json",
"(",
"array_like",
")",
":",
"# type: (np.array or Iterable or int or float) -> str",
"def",
"default",
"(",
"_array_like",
")",
":",
"if",
"hasattr",
"(",
"_array_like",
",",
"'tolist'",
")",
":",
"return",
"_array_like",
".",
"tolist",
"(",... | Convert an array like object to JSON.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to JSON.
Returns:
(str): object serialized to JSON | [
"Convert",
"an",
"array",
"like",
"object",
"to",
"JSON",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L53-L71 | train | 228,234 |
aws/sagemaker-containers | src/sagemaker_containers/_encoders.py | json_to_numpy | def json_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a JSON object to a numpy array.
Args:
string_like (str): JSON string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array
"""
data = json.loads(string_like)
return np.array(data, dtype=dtype) | python | def json_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a JSON object to a numpy array.
Args:
string_like (str): JSON string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array
"""
data = json.loads(string_like)
return np.array(data, dtype=dtype) | [
"def",
"json_to_numpy",
"(",
"string_like",
",",
"dtype",
"=",
"None",
")",
":",
"# type: (str) -> np.array",
"data",
"=",
"json",
".",
"loads",
"(",
"string_like",
")",
"return",
"np",
".",
"array",
"(",
"data",
",",
"dtype",
"=",
"dtype",
")"
] | Convert a JSON object to a numpy array.
Args:
string_like (str): JSON string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array | [
"Convert",
"a",
"JSON",
"object",
"to",
"a",
"numpy",
"array",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L74-L86 | train | 228,235 |
aws/sagemaker-containers | src/sagemaker_containers/_encoders.py | csv_to_numpy | def csv_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a CSV object to a numpy array.
Args:
string_like (str): CSV string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array
"""
stream = StringIO(string_like)
return np.genfromtxt(stream, dtype=dtype, delimiter=',') | python | def csv_to_numpy(string_like, dtype=None): # type: (str) -> np.array
"""Convert a CSV object to a numpy array.
Args:
string_like (str): CSV string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array
"""
stream = StringIO(string_like)
return np.genfromtxt(stream, dtype=dtype, delimiter=',') | [
"def",
"csv_to_numpy",
"(",
"string_like",
",",
"dtype",
"=",
"None",
")",
":",
"# type: (str) -> np.array",
"stream",
"=",
"StringIO",
"(",
"string_like",
")",
"return",
"np",
".",
"genfromtxt",
"(",
"stream",
",",
"dtype",
"=",
"dtype",
",",
"delimiter",
"... | Convert a CSV object to a numpy array.
Args:
string_like (str): CSV string.
dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the
contents of each column, individually. This argument can only be used to
'upcast' the array. For downcasting, use the .astype(t) method.
Returns:
(np.array): numpy array | [
"Convert",
"a",
"CSV",
"object",
"to",
"a",
"numpy",
"array",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L89-L101 | train | 228,236 |
aws/sagemaker-containers | src/sagemaker_containers/_encoders.py | array_to_csv | def array_to_csv(array_like): # type: (np.array or Iterable or int or float) -> str
"""Convert an array like object to CSV.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to CSV.
Returns:
(str): object serialized to CSV
"""
stream = StringIO()
np.savetxt(stream, array_like, delimiter=',', fmt='%s')
return stream.getvalue() | python | def array_to_csv(array_like): # type: (np.array or Iterable or int or float) -> str
"""Convert an array like object to CSV.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to CSV.
Returns:
(str): object serialized to CSV
"""
stream = StringIO()
np.savetxt(stream, array_like, delimiter=',', fmt='%s')
return stream.getvalue() | [
"def",
"array_to_csv",
"(",
"array_like",
")",
":",
"# type: (np.array or Iterable or int or float) -> str",
"stream",
"=",
"StringIO",
"(",
")",
"np",
".",
"savetxt",
"(",
"stream",
",",
"array_like",
",",
"delimiter",
"=",
"','",
",",
"fmt",
"=",
"'%s'",
")",
... | Convert an array like object to CSV.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): array like object to be converted to CSV.
Returns:
(str): object serialized to CSV | [
"Convert",
"an",
"array",
"like",
"object",
"to",
"CSV",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L104-L118 | train | 228,237 |
aws/sagemaker-containers | src/sagemaker_containers/_encoders.py | decode | def decode(obj, content_type):
# type: (np.array or Iterable or int or float, str) -> np.array
"""Decode an object ton a one of the default content types to a numpy array.
Args:
obj (object): to be decoded.
content_type (str): content type to be used.
Returns:
np.array: decoded object.
"""
try:
decoder = _decoders_map[content_type]
return decoder(obj)
except KeyError:
raise _errors.UnsupportedFormatError(content_type) | python | def decode(obj, content_type):
# type: (np.array or Iterable or int or float, str) -> np.array
"""Decode an object ton a one of the default content types to a numpy array.
Args:
obj (object): to be decoded.
content_type (str): content type to be used.
Returns:
np.array: decoded object.
"""
try:
decoder = _decoders_map[content_type]
return decoder(obj)
except KeyError:
raise _errors.UnsupportedFormatError(content_type) | [
"def",
"decode",
"(",
"obj",
",",
"content_type",
")",
":",
"# type: (np.array or Iterable or int or float, str) -> np.array",
"try",
":",
"decoder",
"=",
"_decoders_map",
"[",
"content_type",
"]",
"return",
"decoder",
"(",
"obj",
")",
"except",
"KeyError",
":",
"ra... | Decode an object ton a one of the default content types to a numpy array.
Args:
obj (object): to be decoded.
content_type (str): content type to be used.
Returns:
np.array: decoded object. | [
"Decode",
"an",
"object",
"ton",
"a",
"one",
"of",
"the",
"default",
"content",
"types",
"to",
"a",
"numpy",
"array",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L125-L140 | train | 228,238 |
aws/sagemaker-containers | src/sagemaker_containers/_encoders.py | encode | def encode(array_like, content_type):
# type: (np.array or Iterable or int or float, str) -> np.array
"""Encode an array like object in a specific content_type to a numpy array.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): to be converted to numpy.
content_type (str): content type to be used.
Returns:
(np.array): object converted as numpy array.
"""
try:
encoder = _encoders_map[content_type]
return encoder(array_like)
except KeyError:
raise _errors.UnsupportedFormatError(content_type) | python | def encode(array_like, content_type):
# type: (np.array or Iterable or int or float, str) -> np.array
"""Encode an array like object in a specific content_type to a numpy array.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): to be converted to numpy.
content_type (str): content type to be used.
Returns:
(np.array): object converted as numpy array.
"""
try:
encoder = _encoders_map[content_type]
return encoder(array_like)
except KeyError:
raise _errors.UnsupportedFormatError(content_type) | [
"def",
"encode",
"(",
"array_like",
",",
"content_type",
")",
":",
"# type: (np.array or Iterable or int or float, str) -> np.array",
"try",
":",
"encoder",
"=",
"_encoders_map",
"[",
"content_type",
"]",
"return",
"encoder",
"(",
"array_like",
")",
"except",
"KeyError"... | Encode an array like object in a specific content_type to a numpy array.
To understand better what an array like object is see:
https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
Args:
array_like (np.array or Iterable or int or float): to be converted to numpy.
content_type (str): content type to be used.
Returns:
(np.array): object converted as numpy array. | [
"Encode",
"an",
"array",
"like",
"object",
"in",
"a",
"specific",
"content_type",
"to",
"a",
"numpy",
"array",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_encoders.py#L143-L161 | train | 228,239 |
aws/sagemaker-containers | src/sagemaker_containers/_files.py | tmpdir | def tmpdir(suffix='', prefix='tmp', dir=None): # type: (str, str, str) -> None
"""Create a temporary directory with a context manager. The file is deleted when the context exits.
The prefix, suffix, and dir arguments are the same as for mkstemp().
Args:
suffix (str): If suffix is specified, the file name will end with that suffix, otherwise there will be no
suffix.
prefix (str): If prefix is specified, the file name will begin with that prefix; otherwise,
a default prefix is used.
dir (str): If dir is specified, the file will be created in that directory; otherwise, a default directory is
used.
Returns:
str: path to the directory
"""
tmp = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
yield tmp
shutil.rmtree(tmp) | python | def tmpdir(suffix='', prefix='tmp', dir=None): # type: (str, str, str) -> None
"""Create a temporary directory with a context manager. The file is deleted when the context exits.
The prefix, suffix, and dir arguments are the same as for mkstemp().
Args:
suffix (str): If suffix is specified, the file name will end with that suffix, otherwise there will be no
suffix.
prefix (str): If prefix is specified, the file name will begin with that prefix; otherwise,
a default prefix is used.
dir (str): If dir is specified, the file will be created in that directory; otherwise, a default directory is
used.
Returns:
str: path to the directory
"""
tmp = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir)
yield tmp
shutil.rmtree(tmp) | [
"def",
"tmpdir",
"(",
"suffix",
"=",
"''",
",",
"prefix",
"=",
"'tmp'",
",",
"dir",
"=",
"None",
")",
":",
"# type: (str, str, str) -> None",
"tmp",
"=",
"tempfile",
".",
"mkdtemp",
"(",
"suffix",
"=",
"suffix",
",",
"prefix",
"=",
"prefix",
",",
"dir",
... | Create a temporary directory with a context manager. The file is deleted when the context exits.
The prefix, suffix, and dir arguments are the same as for mkstemp().
Args:
suffix (str): If suffix is specified, the file name will end with that suffix, otherwise there will be no
suffix.
prefix (str): If prefix is specified, the file name will begin with that prefix; otherwise,
a default prefix is used.
dir (str): If dir is specified, the file will be created in that directory; otherwise, a default directory is
used.
Returns:
str: path to the directory | [
"Create",
"a",
"temporary",
"directory",
"with",
"a",
"context",
"manager",
".",
"The",
"file",
"is",
"deleted",
"when",
"the",
"context",
"exits",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_files.py#L50-L67 | train | 228,240 |
aws/sagemaker-containers | src/sagemaker_containers/_files.py | download_and_extract | def download_and_extract(uri, name, path): # type: (str, str, str) -> None
"""Download, prepare and install a compressed tar file from S3 or local directory as an entry point.
SageMaker Python SDK saves the user provided entry points as compressed tar files in S3
Args:
name (str): name of the entry point.
uri (str): the location of the entry point.
path (bool): The path where the script will be installed. It will not download and install the
if the path already has the user entry point.
"""
if not os.path.exists(path):
os.makedirs(path)
if not os.listdir(path):
with tmpdir() as tmp:
if uri.startswith('s3://'):
dst = os.path.join(tmp, 'tar_file')
s3_download(uri, dst)
with tarfile.open(name=dst, mode='r:gz') as t:
t.extractall(path=path)
elif os.path.isdir(uri):
if uri == path:
return
if os.path.exists(path):
shutil.rmtree(path)
shutil.move(uri, path)
else:
shutil.copy2(uri, os.path.join(path, name)) | python | def download_and_extract(uri, name, path): # type: (str, str, str) -> None
"""Download, prepare and install a compressed tar file from S3 or local directory as an entry point.
SageMaker Python SDK saves the user provided entry points as compressed tar files in S3
Args:
name (str): name of the entry point.
uri (str): the location of the entry point.
path (bool): The path where the script will be installed. It will not download and install the
if the path already has the user entry point.
"""
if not os.path.exists(path):
os.makedirs(path)
if not os.listdir(path):
with tmpdir() as tmp:
if uri.startswith('s3://'):
dst = os.path.join(tmp, 'tar_file')
s3_download(uri, dst)
with tarfile.open(name=dst, mode='r:gz') as t:
t.extractall(path=path)
elif os.path.isdir(uri):
if uri == path:
return
if os.path.exists(path):
shutil.rmtree(path)
shutil.move(uri, path)
else:
shutil.copy2(uri, os.path.join(path, name)) | [
"def",
"download_and_extract",
"(",
"uri",
",",
"name",
",",
"path",
")",
":",
"# type: (str, str, str) -> None",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"if",
"not",
"os",
".",
"li... | Download, prepare and install a compressed tar file from S3 or local directory as an entry point.
SageMaker Python SDK saves the user provided entry points as compressed tar files in S3
Args:
name (str): name of the entry point.
uri (str): the location of the entry point.
path (bool): The path where the script will be installed. It will not download and install the
if the path already has the user entry point. | [
"Download",
"prepare",
"and",
"install",
"a",
"compressed",
"tar",
"file",
"from",
"S3",
"or",
"local",
"directory",
"as",
"an",
"entry",
"point",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_files.py#L108-L137 | train | 228,241 |
aws/sagemaker-containers | src/sagemaker_containers/_files.py | s3_download | def s3_download(url, dst): # type: (str, str) -> None
"""Download a file from S3.
Args:
url (str): the s3 url of the file.
dst (str): the destination where the file will be saved.
"""
url = parse.urlparse(url)
if url.scheme != 's3':
raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url))
bucket, key = url.netloc, url.path.lstrip('/')
region = os.environ.get('AWS_REGION', os.environ.get(_params.REGION_NAME_ENV))
s3 = boto3.resource('s3', region_name=region)
s3.Bucket(bucket).download_file(key, dst) | python | def s3_download(url, dst): # type: (str, str) -> None
"""Download a file from S3.
Args:
url (str): the s3 url of the file.
dst (str): the destination where the file will be saved.
"""
url = parse.urlparse(url)
if url.scheme != 's3':
raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url))
bucket, key = url.netloc, url.path.lstrip('/')
region = os.environ.get('AWS_REGION', os.environ.get(_params.REGION_NAME_ENV))
s3 = boto3.resource('s3', region_name=region)
s3.Bucket(bucket).download_file(key, dst) | [
"def",
"s3_download",
"(",
"url",
",",
"dst",
")",
":",
"# type: (str, str) -> None",
"url",
"=",
"parse",
".",
"urlparse",
"(",
"url",
")",
"if",
"url",
".",
"scheme",
"!=",
"'s3'",
":",
"raise",
"ValueError",
"(",
"\"Expecting 's3' scheme, got: %s in %s\"",
... | Download a file from S3.
Args:
url (str): the s3 url of the file.
dst (str): the destination where the file will be saved. | [
"Download",
"a",
"file",
"from",
"S3",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_files.py#L140-L157 | train | 228,242 |
aws/sagemaker-containers | src/sagemaker_containers/_functions.py | matching_args | def matching_args(fn, dictionary): # type: (Callable, _mapping.Mapping) -> dict
"""Given a function fn and a dict dictionary, returns the function arguments that match the dict keys.
Example:
def train(channel_dirs, model_dir): pass
dictionary = {'channel_dirs': {}, 'model_dir': '/opt/ml/model', 'other_args': None}
args = functions.matching_args(train, dictionary) # {'channel_dirs': {}, 'model_dir': '/opt/ml/model'}
train(**args)
Args:
fn (function): a function
dictionary (dict): the dictionary with the keys
Returns:
(dict) a dictionary with only matching arguments.
"""
arg_spec = getargspec(fn)
if arg_spec.keywords:
return dictionary
return _mapping.split_by_criteria(dictionary, arg_spec.args).included | python | def matching_args(fn, dictionary): # type: (Callable, _mapping.Mapping) -> dict
"""Given a function fn and a dict dictionary, returns the function arguments that match the dict keys.
Example:
def train(channel_dirs, model_dir): pass
dictionary = {'channel_dirs': {}, 'model_dir': '/opt/ml/model', 'other_args': None}
args = functions.matching_args(train, dictionary) # {'channel_dirs': {}, 'model_dir': '/opt/ml/model'}
train(**args)
Args:
fn (function): a function
dictionary (dict): the dictionary with the keys
Returns:
(dict) a dictionary with only matching arguments.
"""
arg_spec = getargspec(fn)
if arg_spec.keywords:
return dictionary
return _mapping.split_by_criteria(dictionary, arg_spec.args).included | [
"def",
"matching_args",
"(",
"fn",
",",
"dictionary",
")",
":",
"# type: (Callable, _mapping.Mapping) -> dict",
"arg_spec",
"=",
"getargspec",
"(",
"fn",
")",
"if",
"arg_spec",
".",
"keywords",
":",
"return",
"dictionary",
"return",
"_mapping",
".",
"split_by_criter... | Given a function fn and a dict dictionary, returns the function arguments that match the dict keys.
Example:
def train(channel_dirs, model_dir): pass
dictionary = {'channel_dirs': {}, 'model_dir': '/opt/ml/model', 'other_args': None}
args = functions.matching_args(train, dictionary) # {'channel_dirs': {}, 'model_dir': '/opt/ml/model'}
train(**args)
Args:
fn (function): a function
dictionary (dict): the dictionary with the keys
Returns:
(dict) a dictionary with only matching arguments. | [
"Given",
"a",
"function",
"fn",
"and",
"a",
"dict",
"dictionary",
"returns",
"the",
"function",
"arguments",
"that",
"match",
"the",
"dict",
"keys",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_functions.py#L24-L48 | train | 228,243 |
aws/sagemaker-containers | src/sagemaker_containers/_functions.py | error_wrapper | def error_wrapper(fn, error_class): # type: (Callable or None, Exception) -> ...
"""Wraps function fn in a try catch block that re-raises error_class.
Args:
fn (function): function to wrapped
error_class (Exception): Error class to be re-raised
Returns:
(object): fn wrapped in a try catch.
"""
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
six.reraise(error_class, error_class(e), sys.exc_info()[2])
return wrapper | python | def error_wrapper(fn, error_class): # type: (Callable or None, Exception) -> ...
"""Wraps function fn in a try catch block that re-raises error_class.
Args:
fn (function): function to wrapped
error_class (Exception): Error class to be re-raised
Returns:
(object): fn wrapped in a try catch.
"""
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
six.reraise(error_class, error_class(e), sys.exc_info()[2])
return wrapper | [
"def",
"error_wrapper",
"(",
"fn",
",",
"error_class",
")",
":",
"# type: (Callable or None, Exception) -> ...",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
... | Wraps function fn in a try catch block that re-raises error_class.
Args:
fn (function): function to wrapped
error_class (Exception): Error class to be re-raised
Returns:
(object): fn wrapped in a try catch. | [
"Wraps",
"function",
"fn",
"in",
"a",
"try",
"catch",
"block",
"that",
"re",
"-",
"raises",
"error_class",
"."
] | 0030f07abbaf22a55d986d97274d7a8d1aa1f10c | https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_functions.py#L73-L89 | train | 228,244 |
ceph/ceph-deploy | ceph_deploy/util/packages.py | ceph_is_installed | def ceph_is_installed(module):
"""
A helper callback to be executed after the connection is made to ensure
that Ceph is installed.
"""
ceph_package = Ceph(module.conn)
if not ceph_package.installed:
host = module.conn.hostname
raise RuntimeError(
'ceph needs to be installed in remote host: %s' % host
) | python | def ceph_is_installed(module):
"""
A helper callback to be executed after the connection is made to ensure
that Ceph is installed.
"""
ceph_package = Ceph(module.conn)
if not ceph_package.installed:
host = module.conn.hostname
raise RuntimeError(
'ceph needs to be installed in remote host: %s' % host
) | [
"def",
"ceph_is_installed",
"(",
"module",
")",
":",
"ceph_package",
"=",
"Ceph",
"(",
"module",
".",
"conn",
")",
"if",
"not",
"ceph_package",
".",
"installed",
":",
"host",
"=",
"module",
".",
"conn",
".",
"hostname",
"raise",
"RuntimeError",
"(",
"'ceph... | A helper callback to be executed after the connection is made to ensure
that Ceph is installed. | [
"A",
"helper",
"callback",
"to",
"be",
"executed",
"after",
"the",
"connection",
"is",
"made",
"to",
"ensure",
"that",
"Ceph",
"is",
"installed",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/packages.py#L64-L74 | train | 228,245 |
ceph/ceph-deploy | ceph_deploy/util/log.py | color_format | def color_format():
"""
Main entry point to get a colored formatter, it will use the
BASE_FORMAT by default and fall back to no colors if the system
does not support it
"""
str_format = BASE_COLOR_FORMAT if supports_color() else BASE_FORMAT
color_format = color_message(str_format)
return ColoredFormatter(color_format) | python | def color_format():
"""
Main entry point to get a colored formatter, it will use the
BASE_FORMAT by default and fall back to no colors if the system
does not support it
"""
str_format = BASE_COLOR_FORMAT if supports_color() else BASE_FORMAT
color_format = color_message(str_format)
return ColoredFormatter(color_format) | [
"def",
"color_format",
"(",
")",
":",
"str_format",
"=",
"BASE_COLOR_FORMAT",
"if",
"supports_color",
"(",
")",
"else",
"BASE_FORMAT",
"color_format",
"=",
"color_message",
"(",
"str_format",
")",
"return",
"ColoredFormatter",
"(",
"color_format",
")"
] | Main entry point to get a colored formatter, it will use the
BASE_FORMAT by default and fall back to no colors if the system
does not support it | [
"Main",
"entry",
"point",
"to",
"get",
"a",
"colored",
"formatter",
"it",
"will",
"use",
"the",
"BASE_FORMAT",
"by",
"default",
"and",
"fall",
"back",
"to",
"no",
"colors",
"if",
"the",
"system",
"does",
"not",
"support",
"it"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/log.py#L59-L67 | train | 228,246 |
ceph/ceph-deploy | ceph_deploy/mon.py | mon_status_check | def mon_status_check(conn, logger, hostname, args):
"""
A direct check for JSON output on the monitor status.
For newer versions of Ceph (dumpling and newer) a new mon_status command
was added ( `ceph daemon mon mon_status` ) and should be revisited if the
output changes as this check depends on that availability.
"""
asok_path = paths.mon.asok(args.cluster, hostname)
out, err, code = remoto.process.check(
conn,
[
'ceph',
'--cluster={cluster}'.format(cluster=args.cluster),
'--admin-daemon',
asok_path,
'mon_status',
],
)
for line in err:
logger.error(line)
try:
return json.loads(b''.join(out).decode('utf-8'))
except ValueError:
return {} | python | def mon_status_check(conn, logger, hostname, args):
"""
A direct check for JSON output on the monitor status.
For newer versions of Ceph (dumpling and newer) a new mon_status command
was added ( `ceph daemon mon mon_status` ) and should be revisited if the
output changes as this check depends on that availability.
"""
asok_path = paths.mon.asok(args.cluster, hostname)
out, err, code = remoto.process.check(
conn,
[
'ceph',
'--cluster={cluster}'.format(cluster=args.cluster),
'--admin-daemon',
asok_path,
'mon_status',
],
)
for line in err:
logger.error(line)
try:
return json.loads(b''.join(out).decode('utf-8'))
except ValueError:
return {} | [
"def",
"mon_status_check",
"(",
"conn",
",",
"logger",
",",
"hostname",
",",
"args",
")",
":",
"asok_path",
"=",
"paths",
".",
"mon",
".",
"asok",
"(",
"args",
".",
"cluster",
",",
"hostname",
")",
"out",
",",
"err",
",",
"code",
"=",
"remoto",
".",
... | A direct check for JSON output on the monitor status.
For newer versions of Ceph (dumpling and newer) a new mon_status command
was added ( `ceph daemon mon mon_status` ) and should be revisited if the
output changes as this check depends on that availability. | [
"A",
"direct",
"check",
"for",
"JSON",
"output",
"on",
"the",
"monitor",
"status",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mon.py#L21-L49 | train | 228,247 |
ceph/ceph-deploy | ceph_deploy/mon.py | catch_mon_errors | def catch_mon_errors(conn, logger, hostname, cfg, args):
"""
Make sure we are able to catch up common mishaps with monitors
and use that state of a monitor to determine what is missing
and warn apropriately about it.
"""
monmap = mon_status_check(conn, logger, hostname, args).get('monmap', {})
mon_initial_members = get_mon_initial_members(args, _cfg=cfg)
public_addr = cfg.safe_get('global', 'public_addr')
public_network = cfg.safe_get('global', 'public_network')
mon_in_monmap = [
mon.get('name')
for mon in monmap.get('mons', [{}])
if mon.get('name') == hostname
]
if mon_initial_members is None or not hostname in mon_initial_members:
logger.warning('%s is not defined in `mon initial members`', hostname)
if not mon_in_monmap:
logger.warning('monitor %s does not exist in monmap', hostname)
if not public_addr and not public_network:
logger.warning('neither `public_addr` nor `public_network` keys are defined for monitors')
logger.warning('monitors may not be able to form quorum') | python | def catch_mon_errors(conn, logger, hostname, cfg, args):
"""
Make sure we are able to catch up common mishaps with monitors
and use that state of a monitor to determine what is missing
and warn apropriately about it.
"""
monmap = mon_status_check(conn, logger, hostname, args).get('monmap', {})
mon_initial_members = get_mon_initial_members(args, _cfg=cfg)
public_addr = cfg.safe_get('global', 'public_addr')
public_network = cfg.safe_get('global', 'public_network')
mon_in_monmap = [
mon.get('name')
for mon in monmap.get('mons', [{}])
if mon.get('name') == hostname
]
if mon_initial_members is None or not hostname in mon_initial_members:
logger.warning('%s is not defined in `mon initial members`', hostname)
if not mon_in_monmap:
logger.warning('monitor %s does not exist in monmap', hostname)
if not public_addr and not public_network:
logger.warning('neither `public_addr` nor `public_network` keys are defined for monitors')
logger.warning('monitors may not be able to form quorum') | [
"def",
"catch_mon_errors",
"(",
"conn",
",",
"logger",
",",
"hostname",
",",
"cfg",
",",
"args",
")",
":",
"monmap",
"=",
"mon_status_check",
"(",
"conn",
",",
"logger",
",",
"hostname",
",",
"args",
")",
".",
"get",
"(",
"'monmap'",
",",
"{",
"}",
"... | Make sure we are able to catch up common mishaps with monitors
and use that state of a monitor to determine what is missing
and warn apropriately about it. | [
"Make",
"sure",
"we",
"are",
"able",
"to",
"catch",
"up",
"common",
"mishaps",
"with",
"monitors",
"and",
"use",
"that",
"state",
"of",
"a",
"monitor",
"to",
"determine",
"what",
"is",
"missing",
"and",
"warn",
"apropriately",
"about",
"it",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mon.py#L52-L73 | train | 228,248 |
ceph/ceph-deploy | ceph_deploy/mon.py | mon_status | def mon_status(conn, logger, hostname, args, silent=False):
"""
run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide
not only the output, but be able to return a boolean status of what is
going on.
``False`` represents a monitor that is not doing OK even if it is up and
running, while ``True`` would mean the monitor is up and running correctly.
"""
mon = 'mon.%s' % hostname
try:
out = mon_status_check(conn, logger, hostname, args)
if not out:
logger.warning('monitor: %s, might not be running yet' % mon)
return False
if not silent:
logger.debug('*'*80)
logger.debug('status for monitor: %s' % mon)
for line in json.dumps(out, indent=2, sort_keys=True).split('\n'):
logger.debug(line)
logger.debug('*'*80)
if out['rank'] >= 0:
logger.info('monitor: %s is running' % mon)
return True
if out['rank'] == -1 and out['state']:
logger.info('monitor: %s is currently at the state of %s' % (mon, out['state']))
return True
logger.info('monitor: %s is not running' % mon)
return False
except RuntimeError:
logger.info('monitor: %s is not running' % mon)
return False | python | def mon_status(conn, logger, hostname, args, silent=False):
"""
run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide
not only the output, but be able to return a boolean status of what is
going on.
``False`` represents a monitor that is not doing OK even if it is up and
running, while ``True`` would mean the monitor is up and running correctly.
"""
mon = 'mon.%s' % hostname
try:
out = mon_status_check(conn, logger, hostname, args)
if not out:
logger.warning('monitor: %s, might not be running yet' % mon)
return False
if not silent:
logger.debug('*'*80)
logger.debug('status for monitor: %s' % mon)
for line in json.dumps(out, indent=2, sort_keys=True).split('\n'):
logger.debug(line)
logger.debug('*'*80)
if out['rank'] >= 0:
logger.info('monitor: %s is running' % mon)
return True
if out['rank'] == -1 and out['state']:
logger.info('monitor: %s is currently at the state of %s' % (mon, out['state']))
return True
logger.info('monitor: %s is not running' % mon)
return False
except RuntimeError:
logger.info('monitor: %s is not running' % mon)
return False | [
"def",
"mon_status",
"(",
"conn",
",",
"logger",
",",
"hostname",
",",
"args",
",",
"silent",
"=",
"False",
")",
":",
"mon",
"=",
"'mon.%s'",
"%",
"hostname",
"try",
":",
"out",
"=",
"mon_status_check",
"(",
"conn",
",",
"logger",
",",
"hostname",
",",... | run ``ceph daemon mon.`hostname` mon_status`` on the remote end and provide
not only the output, but be able to return a boolean status of what is
going on.
``False`` represents a monitor that is not doing OK even if it is up and
running, while ``True`` would mean the monitor is up and running correctly. | [
"run",
"ceph",
"daemon",
"mon",
".",
"hostname",
"mon_status",
"on",
"the",
"remote",
"end",
"and",
"provide",
"not",
"only",
"the",
"output",
"but",
"be",
"able",
"to",
"return",
"a",
"boolean",
"status",
"of",
"what",
"is",
"going",
"on",
".",
"False",... | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mon.py#L76-L108 | train | 228,249 |
ceph/ceph-deploy | ceph_deploy/mon.py | hostname_is_compatible | def hostname_is_compatible(conn, logger, provided_hostname):
"""
Make sure that the host that we are connecting to has the same value as the
`hostname` in the remote host, otherwise mons can fail not reaching quorum.
"""
logger.debug('determining if provided host has same hostname in remote')
remote_hostname = conn.remote_module.shortname()
if remote_hostname == provided_hostname:
return
logger.warning('*'*80)
logger.warning('provided hostname must match remote hostname')
logger.warning('provided hostname: %s' % provided_hostname)
logger.warning('remote hostname: %s' % remote_hostname)
logger.warning('monitors may not reach quorum and create-keys will not complete')
logger.warning('*'*80) | python | def hostname_is_compatible(conn, logger, provided_hostname):
"""
Make sure that the host that we are connecting to has the same value as the
`hostname` in the remote host, otherwise mons can fail not reaching quorum.
"""
logger.debug('determining if provided host has same hostname in remote')
remote_hostname = conn.remote_module.shortname()
if remote_hostname == provided_hostname:
return
logger.warning('*'*80)
logger.warning('provided hostname must match remote hostname')
logger.warning('provided hostname: %s' % provided_hostname)
logger.warning('remote hostname: %s' % remote_hostname)
logger.warning('monitors may not reach quorum and create-keys will not complete')
logger.warning('*'*80) | [
"def",
"hostname_is_compatible",
"(",
"conn",
",",
"logger",
",",
"provided_hostname",
")",
":",
"logger",
".",
"debug",
"(",
"'determining if provided host has same hostname in remote'",
")",
"remote_hostname",
"=",
"conn",
".",
"remote_module",
".",
"shortname",
"(",
... | Make sure that the host that we are connecting to has the same value as the
`hostname` in the remote host, otherwise mons can fail not reaching quorum. | [
"Make",
"sure",
"that",
"the",
"host",
"that",
"we",
"are",
"connecting",
"to",
"has",
"the",
"same",
"value",
"as",
"the",
"hostname",
"in",
"the",
"remote",
"host",
"otherwise",
"mons",
"can",
"fail",
"not",
"reaching",
"quorum",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mon.py#L290-L304 | train | 228,250 |
ceph/ceph-deploy | ceph_deploy/mon.py | make | def make(parser):
"""
Ceph MON Daemon management
"""
parser.formatter_class = ToggleRawTextHelpFormatter
mon_parser = parser.add_subparsers(dest='subcommand')
mon_parser.required = True
mon_add = mon_parser.add_parser(
'add',
help=('R|Add a monitor to an existing cluster:\n'
'\tceph-deploy mon add node1\n'
'Or:\n'
'\tceph-deploy mon add --address 192.168.1.10 node1\n'
'If the section for the monitor exists and defines a `mon addr` that\n'
'will be used, otherwise it will fallback by resolving the hostname to an\n'
'IP. If `--address` is used it will override all other options.')
)
mon_add.add_argument(
'--address',
nargs='?',
)
mon_add.add_argument(
'mon',
nargs=1,
)
mon_create = mon_parser.add_parser(
'create',
help=('R|Deploy monitors by specifying them like:\n'
'\tceph-deploy mon create node1 node2 node3\n'
'If no hosts are passed it will default to use the\n'
'`mon initial members` defined in the configuration.')
)
mon_create.add_argument(
'--keyrings',
nargs='?',
help='concatenate multiple keyrings to be seeded on new monitors',
)
mon_create.add_argument(
'mon',
nargs='*',
)
mon_create_initial = mon_parser.add_parser(
'create-initial',
help=('Will deploy for monitors defined in `mon initial members`, '
'wait until they form quorum and then gatherkeys, reporting '
'the monitor status along the process. If monitors don\'t form '
'quorum the command will eventually time out.')
)
mon_create_initial.add_argument(
'--keyrings',
nargs='?',
help='concatenate multiple keyrings to be seeded on new monitors',
)
mon_destroy = mon_parser.add_parser(
'destroy',
help='Completely remove Ceph MON from remote host(s)'
)
mon_destroy.add_argument(
'mon',
nargs='+',
)
parser.set_defaults(
func=mon,
) | python | def make(parser):
"""
Ceph MON Daemon management
"""
parser.formatter_class = ToggleRawTextHelpFormatter
mon_parser = parser.add_subparsers(dest='subcommand')
mon_parser.required = True
mon_add = mon_parser.add_parser(
'add',
help=('R|Add a monitor to an existing cluster:\n'
'\tceph-deploy mon add node1\n'
'Or:\n'
'\tceph-deploy mon add --address 192.168.1.10 node1\n'
'If the section for the monitor exists and defines a `mon addr` that\n'
'will be used, otherwise it will fallback by resolving the hostname to an\n'
'IP. If `--address` is used it will override all other options.')
)
mon_add.add_argument(
'--address',
nargs='?',
)
mon_add.add_argument(
'mon',
nargs=1,
)
mon_create = mon_parser.add_parser(
'create',
help=('R|Deploy monitors by specifying them like:\n'
'\tceph-deploy mon create node1 node2 node3\n'
'If no hosts are passed it will default to use the\n'
'`mon initial members` defined in the configuration.')
)
mon_create.add_argument(
'--keyrings',
nargs='?',
help='concatenate multiple keyrings to be seeded on new monitors',
)
mon_create.add_argument(
'mon',
nargs='*',
)
mon_create_initial = mon_parser.add_parser(
'create-initial',
help=('Will deploy for monitors defined in `mon initial members`, '
'wait until they form quorum and then gatherkeys, reporting '
'the monitor status along the process. If monitors don\'t form '
'quorum the command will eventually time out.')
)
mon_create_initial.add_argument(
'--keyrings',
nargs='?',
help='concatenate multiple keyrings to be seeded on new monitors',
)
mon_destroy = mon_parser.add_parser(
'destroy',
help='Completely remove Ceph MON from remote host(s)'
)
mon_destroy.add_argument(
'mon',
nargs='+',
)
parser.set_defaults(
func=mon,
) | [
"def",
"make",
"(",
"parser",
")",
":",
"parser",
".",
"formatter_class",
"=",
"ToggleRawTextHelpFormatter",
"mon_parser",
"=",
"parser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'subcommand'",
")",
"mon_parser",
".",
"required",
"=",
"True",
"mon_add",
"=",
... | Ceph MON Daemon management | [
"Ceph",
"MON",
"Daemon",
"management"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mon.py#L476-L545 | train | 228,251 |
ceph/ceph-deploy | ceph_deploy/mon.py | get_mon_initial_members | def get_mon_initial_members(args, error_on_empty=False, _cfg=None):
"""
Read the Ceph config file and return the value of mon_initial_members
Optionally, a NeedHostError can be raised if the value is None.
"""
if _cfg:
cfg = _cfg
else:
cfg = conf.ceph.load(args)
mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
if not mon_initial_members:
if error_on_empty:
raise exc.NeedHostError(
'could not find `mon initial members` defined in ceph.conf'
)
else:
mon_initial_members = re.split(r'[,\s]+', mon_initial_members)
return mon_initial_members | python | def get_mon_initial_members(args, error_on_empty=False, _cfg=None):
"""
Read the Ceph config file and return the value of mon_initial_members
Optionally, a NeedHostError can be raised if the value is None.
"""
if _cfg:
cfg = _cfg
else:
cfg = conf.ceph.load(args)
mon_initial_members = cfg.safe_get('global', 'mon_initial_members')
if not mon_initial_members:
if error_on_empty:
raise exc.NeedHostError(
'could not find `mon initial members` defined in ceph.conf'
)
else:
mon_initial_members = re.split(r'[,\s]+', mon_initial_members)
return mon_initial_members | [
"def",
"get_mon_initial_members",
"(",
"args",
",",
"error_on_empty",
"=",
"False",
",",
"_cfg",
"=",
"None",
")",
":",
"if",
"_cfg",
":",
"cfg",
"=",
"_cfg",
"else",
":",
"cfg",
"=",
"conf",
".",
"ceph",
".",
"load",
"(",
"args",
")",
"mon_initial_mem... | Read the Ceph config file and return the value of mon_initial_members
Optionally, a NeedHostError can be raised if the value is None. | [
"Read",
"the",
"Ceph",
"config",
"file",
"and",
"return",
"the",
"value",
"of",
"mon_initial_members",
"Optionally",
"a",
"NeedHostError",
"can",
"be",
"raised",
"if",
"the",
"value",
"is",
"None",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mon.py#L552-L569 | train | 228,252 |
ceph/ceph-deploy | ceph_deploy/mon.py | is_running | def is_running(conn, args):
"""
Run a command to check the status of a mon, return a boolean.
We heavily depend on the format of the output, if that ever changes
we need to modify this.
Check daemon status for 3 times
output of the status should be similar to::
mon.mira094: running {"version":"0.61.5"}
or when it fails::
mon.mira094: dead {"version":"0.61.5"}
mon.mira094: not running {"version":"0.61.5"}
"""
stdout, stderr, _ = remoto.process.check(
conn,
args
)
result_string = b' '.join(stdout)
for run_check in [b': running', b' start/running']:
if run_check in result_string:
return True
return False | python | def is_running(conn, args):
"""
Run a command to check the status of a mon, return a boolean.
We heavily depend on the format of the output, if that ever changes
we need to modify this.
Check daemon status for 3 times
output of the status should be similar to::
mon.mira094: running {"version":"0.61.5"}
or when it fails::
mon.mira094: dead {"version":"0.61.5"}
mon.mira094: not running {"version":"0.61.5"}
"""
stdout, stderr, _ = remoto.process.check(
conn,
args
)
result_string = b' '.join(stdout)
for run_check in [b': running', b' start/running']:
if run_check in result_string:
return True
return False | [
"def",
"is_running",
"(",
"conn",
",",
"args",
")",
":",
"stdout",
",",
"stderr",
",",
"_",
"=",
"remoto",
".",
"process",
".",
"check",
"(",
"conn",
",",
"args",
")",
"result_string",
"=",
"b' '",
".",
"join",
"(",
"stdout",
")",
"for",
"run_check",... | Run a command to check the status of a mon, return a boolean.
We heavily depend on the format of the output, if that ever changes
we need to modify this.
Check daemon status for 3 times
output of the status should be similar to::
mon.mira094: running {"version":"0.61.5"}
or when it fails::
mon.mira094: dead {"version":"0.61.5"}
mon.mira094: not running {"version":"0.61.5"} | [
"Run",
"a",
"command",
"to",
"check",
"the",
"status",
"of",
"a",
"mon",
"return",
"a",
"boolean",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mon.py#L572-L596 | train | 228,253 |
ceph/ceph-deploy | ceph_deploy/util/system.py | executable_path | def executable_path(conn, executable):
"""
Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found.
"""
executable_path = conn.remote_module.which(executable)
if not executable_path:
raise ExecutableNotFound(executable, conn.hostname)
return executable_path | python | def executable_path(conn, executable):
"""
Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found.
"""
executable_path = conn.remote_module.which(executable)
if not executable_path:
raise ExecutableNotFound(executable, conn.hostname)
return executable_path | [
"def",
"executable_path",
"(",
"conn",
",",
"executable",
")",
":",
"executable_path",
"=",
"conn",
".",
"remote_module",
".",
"which",
"(",
"executable",
")",
"if",
"not",
"executable_path",
":",
"raise",
"ExecutableNotFound",
"(",
"executable",
",",
"conn",
... | Remote validator that accepts a connection object to ensure that a certain
executable is available returning its full path if so.
Otherwise an exception with thorough details will be raised, informing the
user that the executable was not found. | [
"Remote",
"validator",
"that",
"accepts",
"a",
"connection",
"object",
"to",
"ensure",
"that",
"a",
"certain",
"executable",
"is",
"available",
"returning",
"its",
"full",
"path",
"if",
"so",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/system.py#L5-L16 | train | 228,254 |
ceph/ceph-deploy | ceph_deploy/util/system.py | is_systemd_service_enabled | def is_systemd_service_enabled(conn, service='ceph'):
"""
Detects if a systemd service is enabled or not.
"""
_, _, returncode = remoto.process.check(
conn,
[
'systemctl',
'is-enabled',
'--quiet',
'{service}'.format(service=service),
]
)
return returncode == 0 | python | def is_systemd_service_enabled(conn, service='ceph'):
"""
Detects if a systemd service is enabled or not.
"""
_, _, returncode = remoto.process.check(
conn,
[
'systemctl',
'is-enabled',
'--quiet',
'{service}'.format(service=service),
]
)
return returncode == 0 | [
"def",
"is_systemd_service_enabled",
"(",
"conn",
",",
"service",
"=",
"'ceph'",
")",
":",
"_",
",",
"_",
",",
"returncode",
"=",
"remoto",
".",
"process",
".",
"check",
"(",
"conn",
",",
"[",
"'systemctl'",
",",
"'is-enabled'",
",",
"'--quiet'",
",",
"'... | Detects if a systemd service is enabled or not. | [
"Detects",
"if",
"a",
"systemd",
"service",
"is",
"enabled",
"or",
"not",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/system.py#L167-L180 | train | 228,255 |
ceph/ceph-deploy | ceph_deploy/repo.py | make | def make(parser):
"""
Repo definition management
"""
parser.add_argument(
'repo_name',
metavar='REPO-NAME',
help='Name of repo to manage. Can match an entry in cephdeploy.conf'
)
parser.add_argument(
'--repo-url',
help='a repo URL that mirrors/contains Ceph packages'
)
parser.add_argument(
'--gpg-url',
help='a GPG key URL to be used with custom repos'
)
parser.add_argument(
'--remove', '--delete',
action='store_true',
help='remove repo definition on remote host'
)
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='host(s) to install on'
)
parser.set_defaults(
func=repo
) | python | def make(parser):
"""
Repo definition management
"""
parser.add_argument(
'repo_name',
metavar='REPO-NAME',
help='Name of repo to manage. Can match an entry in cephdeploy.conf'
)
parser.add_argument(
'--repo-url',
help='a repo URL that mirrors/contains Ceph packages'
)
parser.add_argument(
'--gpg-url',
help='a GPG key URL to be used with custom repos'
)
parser.add_argument(
'--remove', '--delete',
action='store_true',
help='remove repo definition on remote host'
)
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='host(s) to install on'
)
parser.set_defaults(
func=repo
) | [
"def",
"make",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'repo_name'",
",",
"metavar",
"=",
"'REPO-NAME'",
",",
"help",
"=",
"'Name of repo to manage. Can match an entry in cephdeploy.conf'",
")",
"parser",
".",
"add_argument",
"(",
"'--repo-url'"... | Repo definition management | [
"Repo",
"definition",
"management"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/repo.py#L77-L113 | train | 228,256 |
ceph/ceph-deploy | ceph_deploy/conf/cephdeploy.py | Conf.get_list | def get_list(self, section, key):
"""
Assumes that the value for a given key is going to be a list
separated by commas. It gets rid of trailing comments.
If just one item is present it returns a list with a single item, if no
key is found an empty list is returned.
"""
value = self.get_safe(section, key, [])
if value == []:
return value
# strip comments
value = re.split(r'\s+#', value)[0]
# split on commas
value = value.split(',')
# strip spaces
return [x.strip() for x in value] | python | def get_list(self, section, key):
"""
Assumes that the value for a given key is going to be a list
separated by commas. It gets rid of trailing comments.
If just one item is present it returns a list with a single item, if no
key is found an empty list is returned.
"""
value = self.get_safe(section, key, [])
if value == []:
return value
# strip comments
value = re.split(r'\s+#', value)[0]
# split on commas
value = value.split(',')
# strip spaces
return [x.strip() for x in value] | [
"def",
"get_list",
"(",
"self",
",",
"section",
",",
"key",
")",
":",
"value",
"=",
"self",
".",
"get_safe",
"(",
"section",
",",
"key",
",",
"[",
"]",
")",
"if",
"value",
"==",
"[",
"]",
":",
"return",
"value",
"# strip comments",
"value",
"=",
"r... | Assumes that the value for a given key is going to be a list
separated by commas. It gets rid of trailing comments.
If just one item is present it returns a list with a single item, if no
key is found an empty list is returned. | [
"Assumes",
"that",
"the",
"value",
"for",
"a",
"given",
"key",
"is",
"going",
"to",
"be",
"a",
"list",
"separated",
"by",
"commas",
".",
"It",
"gets",
"rid",
"of",
"trailing",
"comments",
".",
"If",
"just",
"one",
"item",
"is",
"present",
"it",
"return... | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/conf/cephdeploy.py#L189-L207 | train | 228,257 |
ceph/ceph-deploy | ceph_deploy/conf/cephdeploy.py | Conf.get_default_repo | def get_default_repo(self):
"""
Go through all the repositories defined in the config file and search
for a truthy value for the ``default`` key. If there isn't any return
None.
"""
for repo in self.get_repos():
if self.get_safe(repo, 'default') and self.getboolean(repo, 'default'):
return repo
return False | python | def get_default_repo(self):
"""
Go through all the repositories defined in the config file and search
for a truthy value for the ``default`` key. If there isn't any return
None.
"""
for repo in self.get_repos():
if self.get_safe(repo, 'default') and self.getboolean(repo, 'default'):
return repo
return False | [
"def",
"get_default_repo",
"(",
"self",
")",
":",
"for",
"repo",
"in",
"self",
".",
"get_repos",
"(",
")",
":",
"if",
"self",
".",
"get_safe",
"(",
"repo",
",",
"'default'",
")",
"and",
"self",
".",
"getboolean",
"(",
"repo",
",",
"'default'",
")",
"... | Go through all the repositories defined in the config file and search
for a truthy value for the ``default`` key. If there isn't any return
None. | [
"Go",
"through",
"all",
"the",
"repositories",
"defined",
"in",
"the",
"config",
"file",
"and",
"search",
"for",
"a",
"truthy",
"value",
"for",
"the",
"default",
"key",
".",
"If",
"there",
"isn",
"t",
"any",
"return",
"None",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/conf/cephdeploy.py#L209-L218 | train | 228,258 |
ceph/ceph-deploy | ceph_deploy/new.py | validate_host_ip | def validate_host_ip(ips, subnets):
"""
Make sure that a given host all subnets specified will have at least one IP
in that range.
"""
# Make sure we prune ``None`` arguments
subnets = [s for s in subnets if s is not None]
validate_one_subnet = len(subnets) == 1
def ip_in_one_subnet(ips, subnet):
""" ensure an ip exists in at least one subnet """
for ip in ips:
if net.ip_in_subnet(ip, subnet):
return True
return False
for subnet in subnets:
if ip_in_one_subnet(ips, subnet):
if validate_one_subnet:
return
else: # keep going to make sure the other subnets are ok
continue
else:
msg = "subnet (%s) is not valid for any of the ips found %s" % (subnet, str(ips))
raise RuntimeError(msg) | python | def validate_host_ip(ips, subnets):
"""
Make sure that a given host all subnets specified will have at least one IP
in that range.
"""
# Make sure we prune ``None`` arguments
subnets = [s for s in subnets if s is not None]
validate_one_subnet = len(subnets) == 1
def ip_in_one_subnet(ips, subnet):
""" ensure an ip exists in at least one subnet """
for ip in ips:
if net.ip_in_subnet(ip, subnet):
return True
return False
for subnet in subnets:
if ip_in_one_subnet(ips, subnet):
if validate_one_subnet:
return
else: # keep going to make sure the other subnets are ok
continue
else:
msg = "subnet (%s) is not valid for any of the ips found %s" % (subnet, str(ips))
raise RuntimeError(msg) | [
"def",
"validate_host_ip",
"(",
"ips",
",",
"subnets",
")",
":",
"# Make sure we prune ``None`` arguments",
"subnets",
"=",
"[",
"s",
"for",
"s",
"in",
"subnets",
"if",
"s",
"is",
"not",
"None",
"]",
"validate_one_subnet",
"=",
"len",
"(",
"subnets",
")",
"=... | Make sure that a given host all subnets specified will have at least one IP
in that range. | [
"Make",
"sure",
"that",
"a",
"given",
"host",
"all",
"subnets",
"specified",
"will",
"have",
"at",
"least",
"one",
"IP",
"in",
"that",
"range",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/new.py#L78-L102 | train | 228,259 |
ceph/ceph-deploy | ceph_deploy/new.py | get_public_network_ip | def get_public_network_ip(ips, public_subnet):
"""
Given a public subnet, chose the one IP from the remote host that exists
within the subnet range.
"""
for ip in ips:
if net.ip_in_subnet(ip, public_subnet):
return ip
msg = "IPs (%s) are not valid for any of subnet specified %s" % (str(ips), str(public_subnet))
raise RuntimeError(msg) | python | def get_public_network_ip(ips, public_subnet):
"""
Given a public subnet, chose the one IP from the remote host that exists
within the subnet range.
"""
for ip in ips:
if net.ip_in_subnet(ip, public_subnet):
return ip
msg = "IPs (%s) are not valid for any of subnet specified %s" % (str(ips), str(public_subnet))
raise RuntimeError(msg) | [
"def",
"get_public_network_ip",
"(",
"ips",
",",
"public_subnet",
")",
":",
"for",
"ip",
"in",
"ips",
":",
"if",
"net",
".",
"ip_in_subnet",
"(",
"ip",
",",
"public_subnet",
")",
":",
"return",
"ip",
"msg",
"=",
"\"IPs (%s) are not valid for any of subnet specif... | Given a public subnet, chose the one IP from the remote host that exists
within the subnet range. | [
"Given",
"a",
"public",
"subnet",
"chose",
"the",
"one",
"IP",
"from",
"the",
"remote",
"host",
"that",
"exists",
"within",
"the",
"subnet",
"range",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/new.py#L105-L114 | train | 228,260 |
ceph/ceph-deploy | ceph_deploy/new.py | make | def make(parser):
"""
Start deploying a new cluster, and write a CLUSTER.conf and keyring for it.
"""
parser.add_argument(
'mon',
metavar='MON',
nargs='+',
help='initial monitor hostname, fqdn, or hostname:fqdn pair',
type=arg_validators.Hostname(),
)
parser.add_argument(
'--no-ssh-copykey',
dest='ssh_copykey',
action='store_false',
default=True,
help='do not attempt to copy SSH keys',
)
parser.add_argument(
'--fsid',
dest='fsid',
help='provide an alternate FSID for ceph.conf generation',
)
parser.add_argument(
'--cluster-network',
help='specify the (internal) cluster network',
type=arg_validators.Subnet(),
)
parser.add_argument(
'--public-network',
help='specify the public network for a cluster',
type=arg_validators.Subnet(),
)
parser.set_defaults(
func=new,
) | python | def make(parser):
"""
Start deploying a new cluster, and write a CLUSTER.conf and keyring for it.
"""
parser.add_argument(
'mon',
metavar='MON',
nargs='+',
help='initial monitor hostname, fqdn, or hostname:fqdn pair',
type=arg_validators.Hostname(),
)
parser.add_argument(
'--no-ssh-copykey',
dest='ssh_copykey',
action='store_false',
default=True,
help='do not attempt to copy SSH keys',
)
parser.add_argument(
'--fsid',
dest='fsid',
help='provide an alternate FSID for ceph.conf generation',
)
parser.add_argument(
'--cluster-network',
help='specify the (internal) cluster network',
type=arg_validators.Subnet(),
)
parser.add_argument(
'--public-network',
help='specify the public network for a cluster',
type=arg_validators.Subnet(),
)
parser.set_defaults(
func=new,
) | [
"def",
"make",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'mon'",
",",
"metavar",
"=",
"'MON'",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"'initial monitor hostname, fqdn, or hostname:fqdn pair'",
",",
"type",
"=",
"arg_validators",
".",
"... | Start deploying a new cluster, and write a CLUSTER.conf and keyring for it. | [
"Start",
"deploying",
"a",
"new",
"cluster",
"and",
"write",
"a",
"CLUSTER",
".",
"conf",
"and",
"keyring",
"for",
"it",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/new.py#L237-L276 | train | 228,261 |
ceph/ceph-deploy | ceph_deploy/mds.py | make | def make(parser):
"""
Ceph MDS daemon management
"""
mds_parser = parser.add_subparsers(dest='subcommand')
mds_parser.required = True
mds_create = mds_parser.add_parser(
'create',
help='Deploy Ceph MDS on remote host(s)'
)
mds_create.add_argument(
'mds',
metavar='HOST[:NAME]',
nargs='+',
type=colon_separated,
help='host (and optionally the daemon name) to deploy on',
)
parser.set_defaults(
func=mds,
) | python | def make(parser):
"""
Ceph MDS daemon management
"""
mds_parser = parser.add_subparsers(dest='subcommand')
mds_parser.required = True
mds_create = mds_parser.add_parser(
'create',
help='Deploy Ceph MDS on remote host(s)'
)
mds_create.add_argument(
'mds',
metavar='HOST[:NAME]',
nargs='+',
type=colon_separated,
help='host (and optionally the daemon name) to deploy on',
)
parser.set_defaults(
func=mds,
) | [
"def",
"make",
"(",
"parser",
")",
":",
"mds_parser",
"=",
"parser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'subcommand'",
")",
"mds_parser",
".",
"required",
"=",
"True",
"mds_create",
"=",
"mds_parser",
".",
"add_parser",
"(",
"'create'",
",",
"help",
... | Ceph MDS daemon management | [
"Ceph",
"MDS",
"daemon",
"management"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mds.py#L206-L226 | train | 228,262 |
ceph/ceph-deploy | ceph_deploy/hosts/util.py | install_yum_priorities | def install_yum_priorities(distro, _yum=None):
"""
EPEL started packaging Ceph so we need to make sure that the ceph.repo we
install has a higher priority than the EPEL repo so that when installing
Ceph it will come from the repo file we create.
The name of the package changed back and forth (!) since CentOS 4:
From the CentOS wiki::
Note: This plugin has carried at least two differing names over time.
It is named yum-priorities on CentOS-5 but was named
yum-plugin-priorities on CentOS-4. CentOS-6 has reverted to
yum-plugin-priorities.
:params _yum: Used for testing, so we can inject a fake yum
"""
yum = _yum or pkg_managers.yum
package_name = 'yum-plugin-priorities'
if distro.normalized_name == 'centos':
if distro.release[0] != '6':
package_name = 'yum-priorities'
yum(distro.conn, package_name) | python | def install_yum_priorities(distro, _yum=None):
"""
EPEL started packaging Ceph so we need to make sure that the ceph.repo we
install has a higher priority than the EPEL repo so that when installing
Ceph it will come from the repo file we create.
The name of the package changed back and forth (!) since CentOS 4:
From the CentOS wiki::
Note: This plugin has carried at least two differing names over time.
It is named yum-priorities on CentOS-5 but was named
yum-plugin-priorities on CentOS-4. CentOS-6 has reverted to
yum-plugin-priorities.
:params _yum: Used for testing, so we can inject a fake yum
"""
yum = _yum or pkg_managers.yum
package_name = 'yum-plugin-priorities'
if distro.normalized_name == 'centos':
if distro.release[0] != '6':
package_name = 'yum-priorities'
yum(distro.conn, package_name) | [
"def",
"install_yum_priorities",
"(",
"distro",
",",
"_yum",
"=",
"None",
")",
":",
"yum",
"=",
"_yum",
"or",
"pkg_managers",
".",
"yum",
"package_name",
"=",
"'yum-plugin-priorities'",
"if",
"distro",
".",
"normalized_name",
"==",
"'centos'",
":",
"if",
"dist... | EPEL started packaging Ceph so we need to make sure that the ceph.repo we
install has a higher priority than the EPEL repo so that when installing
Ceph it will come from the repo file we create.
The name of the package changed back and forth (!) since CentOS 4:
From the CentOS wiki::
Note: This plugin has carried at least two differing names over time.
It is named yum-priorities on CentOS-5 but was named
yum-plugin-priorities on CentOS-4. CentOS-6 has reverted to
yum-plugin-priorities.
:params _yum: Used for testing, so we can inject a fake yum | [
"EPEL",
"started",
"packaging",
"Ceph",
"so",
"we",
"need",
"to",
"make",
"sure",
"that",
"the",
"ceph",
".",
"repo",
"we",
"install",
"has",
"a",
"higher",
"priority",
"than",
"the",
"EPEL",
"repo",
"so",
"that",
"when",
"installing",
"Ceph",
"it",
"wil... | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/util.py#L8-L31 | train | 228,263 |
ceph/ceph-deploy | ceph_deploy/util/decorators.py | make_exception_message | def make_exception_message(exc):
"""
An exception is passed in and this function
returns the proper string depending on the result
so it is readable enough.
"""
if str(exc):
return '%s: %s\n' % (exc.__class__.__name__, exc)
else:
return '%s\n' % (exc.__class__.__name__) | python | def make_exception_message(exc):
"""
An exception is passed in and this function
returns the proper string depending on the result
so it is readable enough.
"""
if str(exc):
return '%s: %s\n' % (exc.__class__.__name__, exc)
else:
return '%s\n' % (exc.__class__.__name__) | [
"def",
"make_exception_message",
"(",
"exc",
")",
":",
"if",
"str",
"(",
"exc",
")",
":",
"return",
"'%s: %s\\n'",
"%",
"(",
"exc",
".",
"__class__",
".",
"__name__",
",",
"exc",
")",
"else",
":",
"return",
"'%s\\n'",
"%",
"(",
"exc",
".",
"__class__",... | An exception is passed in and this function
returns the proper string depending on the result
so it is readable enough. | [
"An",
"exception",
"is",
"passed",
"in",
"and",
"this",
"function",
"returns",
"the",
"proper",
"string",
"depending",
"on",
"the",
"result",
"so",
"it",
"is",
"readable",
"enough",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/decorators.py#L102-L111 | train | 228,264 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | platform_information | def platform_information(_linux_distribution=None):
""" detect platform information from remote host """
linux_distribution = _linux_distribution or platform.linux_distribution
distro, release, codename = linux_distribution()
if not distro:
distro, release, codename = parse_os_release()
if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian
debian_codenames = {
'10': 'buster',
'9': 'stretch',
'8': 'jessie',
'7': 'wheezy',
'6': 'squeeze',
}
major_version = release.split('.')[0]
codename = debian_codenames.get(major_version, '')
# In order to support newer jessie/sid or wheezy/sid strings we test this
# if sid is buried in the minor, we should use sid anyway.
if not codename and '/' in release:
major, minor = release.split('/')
if minor == 'sid':
codename = minor
else:
codename = major
if not codename and 'oracle' in distro.lower(): # this could be an empty string in Oracle linux
codename = 'oracle'
if not codename and 'virtuozzo linux' in distro.lower(): # this could be an empty string in Virtuozzo linux
codename = 'virtuozzo'
if not codename and 'arch' in distro.lower(): # this could be an empty string in Arch linux
codename = 'arch'
return (
str(distro).rstrip(),
str(release).rstrip(),
str(codename).rstrip()
) | python | def platform_information(_linux_distribution=None):
""" detect platform information from remote host """
linux_distribution = _linux_distribution or platform.linux_distribution
distro, release, codename = linux_distribution()
if not distro:
distro, release, codename = parse_os_release()
if not codename and 'debian' in distro.lower(): # this could be an empty string in Debian
debian_codenames = {
'10': 'buster',
'9': 'stretch',
'8': 'jessie',
'7': 'wheezy',
'6': 'squeeze',
}
major_version = release.split('.')[0]
codename = debian_codenames.get(major_version, '')
# In order to support newer jessie/sid or wheezy/sid strings we test this
# if sid is buried in the minor, we should use sid anyway.
if not codename and '/' in release:
major, minor = release.split('/')
if minor == 'sid':
codename = minor
else:
codename = major
if not codename and 'oracle' in distro.lower(): # this could be an empty string in Oracle linux
codename = 'oracle'
if not codename and 'virtuozzo linux' in distro.lower(): # this could be an empty string in Virtuozzo linux
codename = 'virtuozzo'
if not codename and 'arch' in distro.lower(): # this could be an empty string in Arch linux
codename = 'arch'
return (
str(distro).rstrip(),
str(release).rstrip(),
str(codename).rstrip()
) | [
"def",
"platform_information",
"(",
"_linux_distribution",
"=",
"None",
")",
":",
"linux_distribution",
"=",
"_linux_distribution",
"or",
"platform",
".",
"linux_distribution",
"distro",
",",
"release",
",",
"codename",
"=",
"linux_distribution",
"(",
")",
"if",
"no... | detect platform information from remote host | [
"detect",
"platform",
"information",
"from",
"remote",
"host"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L14-L50 | train | 228,265 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | write_keyring | def write_keyring(path, key, uid=-1, gid=-1):
""" create a keyring file """
# Note that we *require* to avoid deletion of the temp file
# otherwise we risk not being able to copy the contents from
# one file system to the other, hence the `delete=False`
tmp_file = tempfile.NamedTemporaryFile('wb', delete=False)
tmp_file.write(key)
tmp_file.close()
keyring_dir = os.path.dirname(path)
if not path_exists(keyring_dir):
makedir(keyring_dir, uid, gid)
shutil.move(tmp_file.name, path) | python | def write_keyring(path, key, uid=-1, gid=-1):
""" create a keyring file """
# Note that we *require* to avoid deletion of the temp file
# otherwise we risk not being able to copy the contents from
# one file system to the other, hence the `delete=False`
tmp_file = tempfile.NamedTemporaryFile('wb', delete=False)
tmp_file.write(key)
tmp_file.close()
keyring_dir = os.path.dirname(path)
if not path_exists(keyring_dir):
makedir(keyring_dir, uid, gid)
shutil.move(tmp_file.name, path) | [
"def",
"write_keyring",
"(",
"path",
",",
"key",
",",
"uid",
"=",
"-",
"1",
",",
"gid",
"=",
"-",
"1",
")",
":",
"# Note that we *require* to avoid deletion of the temp file",
"# otherwise we risk not being able to copy the contents from",
"# one file system to the other, hen... | create a keyring file | [
"create",
"a",
"keyring",
"file"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L178-L189 | train | 228,266 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | create_mon_path | def create_mon_path(path, uid=-1, gid=-1):
"""create the mon path if it does not exist"""
if not os.path.exists(path):
os.makedirs(path)
os.chown(path, uid, gid); | python | def create_mon_path(path, uid=-1, gid=-1):
"""create the mon path if it does not exist"""
if not os.path.exists(path):
os.makedirs(path)
os.chown(path, uid, gid); | [
"def",
"create_mon_path",
"(",
"path",
",",
"uid",
"=",
"-",
"1",
",",
"gid",
"=",
"-",
"1",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"os",
".",
"chown",
"(",
"pa... | create the mon path if it does not exist | [
"create",
"the",
"mon",
"path",
"if",
"it",
"does",
"not",
"exist"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L192-L196 | train | 228,267 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | create_done_path | def create_done_path(done_path, uid=-1, gid=-1):
"""create a done file to avoid re-doing the mon deployment"""
with open(done_path, 'wb'):
pass
os.chown(done_path, uid, gid); | python | def create_done_path(done_path, uid=-1, gid=-1):
"""create a done file to avoid re-doing the mon deployment"""
with open(done_path, 'wb'):
pass
os.chown(done_path, uid, gid); | [
"def",
"create_done_path",
"(",
"done_path",
",",
"uid",
"=",
"-",
"1",
",",
"gid",
"=",
"-",
"1",
")",
":",
"with",
"open",
"(",
"done_path",
",",
"'wb'",
")",
":",
"pass",
"os",
".",
"chown",
"(",
"done_path",
",",
"uid",
",",
"gid",
")"
] | create a done file to avoid re-doing the mon deployment | [
"create",
"a",
"done",
"file",
"to",
"avoid",
"re",
"-",
"doing",
"the",
"mon",
"deployment"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L199-L203 | train | 228,268 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | create_init_path | def create_init_path(init_path, uid=-1, gid=-1):
"""create the init path if it does not exist"""
if not os.path.exists(init_path):
with open(init_path, 'wb'):
pass
os.chown(init_path, uid, gid); | python | def create_init_path(init_path, uid=-1, gid=-1):
"""create the init path if it does not exist"""
if not os.path.exists(init_path):
with open(init_path, 'wb'):
pass
os.chown(init_path, uid, gid); | [
"def",
"create_init_path",
"(",
"init_path",
",",
"uid",
"=",
"-",
"1",
",",
"gid",
"=",
"-",
"1",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"init_path",
")",
":",
"with",
"open",
"(",
"init_path",
",",
"'wb'",
")",
":",
"pass... | create the init path if it does not exist | [
"create",
"the",
"init",
"path",
"if",
"it",
"does",
"not",
"exist"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L206-L211 | train | 228,269 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | write_monitor_keyring | def write_monitor_keyring(keyring, monitor_keyring, uid=-1, gid=-1):
"""create the monitor keyring file"""
write_file(keyring, monitor_keyring, 0o600, None, uid, gid) | python | def write_monitor_keyring(keyring, monitor_keyring, uid=-1, gid=-1):
"""create the monitor keyring file"""
write_file(keyring, monitor_keyring, 0o600, None, uid, gid) | [
"def",
"write_monitor_keyring",
"(",
"keyring",
",",
"monitor_keyring",
",",
"uid",
"=",
"-",
"1",
",",
"gid",
"=",
"-",
"1",
")",
":",
"write_file",
"(",
"keyring",
",",
"monitor_keyring",
",",
"0o600",
",",
"None",
",",
"uid",
",",
"gid",
")"
] | create the monitor keyring file | [
"create",
"the",
"monitor",
"keyring",
"file"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L260-L262 | train | 228,270 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | which | def which(executable):
"""find the location of an executable"""
locations = (
'/usr/local/bin',
'/bin',
'/usr/bin',
'/usr/local/sbin',
'/usr/sbin',
'/sbin',
)
for location in locations:
executable_path = os.path.join(location, executable)
if os.path.exists(executable_path) and os.path.isfile(executable_path):
return executable_path | python | def which(executable):
"""find the location of an executable"""
locations = (
'/usr/local/bin',
'/bin',
'/usr/bin',
'/usr/local/sbin',
'/usr/sbin',
'/sbin',
)
for location in locations:
executable_path = os.path.join(location, executable)
if os.path.exists(executable_path) and os.path.isfile(executable_path):
return executable_path | [
"def",
"which",
"(",
"executable",
")",
":",
"locations",
"=",
"(",
"'/usr/local/bin'",
",",
"'/bin'",
",",
"'/usr/bin'",
",",
"'/usr/local/sbin'",
",",
"'/usr/sbin'",
",",
"'/sbin'",
",",
")",
"for",
"location",
"in",
"locations",
":",
"executable_path",
"=",... | find the location of an executable | [
"find",
"the",
"location",
"of",
"an",
"executable"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L331-L345 | train | 228,271 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | make_mon_removed_dir | def make_mon_removed_dir(path, file_name):
""" move old monitor data """
try:
os.makedirs('/var/lib/ceph/mon-removed')
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.move(path, os.path.join('/var/lib/ceph/mon-removed/', file_name)) | python | def make_mon_removed_dir(path, file_name):
""" move old monitor data """
try:
os.makedirs('/var/lib/ceph/mon-removed')
except OSError as e:
if e.errno != errno.EEXIST:
raise
shutil.move(path, os.path.join('/var/lib/ceph/mon-removed/', file_name)) | [
"def",
"make_mon_removed_dir",
"(",
"path",
",",
"file_name",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"'/var/lib/ceph/mon-removed'",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"errno",
".",
"EEXIST",
":",
"raise",
... | move old monitor data | [
"move",
"old",
"monitor",
"data"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L348-L355 | train | 228,272 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | safe_mkdir | def safe_mkdir(path, uid=-1, gid=-1):
""" create path if it doesn't exist """
try:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
os.chown(path, uid, gid) | python | def safe_mkdir(path, uid=-1, gid=-1):
""" create path if it doesn't exist """
try:
os.mkdir(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
os.chown(path, uid, gid) | [
"def",
"safe_mkdir",
"(",
"path",
",",
"uid",
"=",
"-",
"1",
",",
"gid",
"=",
"-",
"1",
")",
":",
"try",
":",
"os",
".",
"mkdir",
"(",
"path",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
":... | create path if it doesn't exist | [
"create",
"path",
"if",
"it",
"doesn",
"t",
"exist"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L358-L368 | train | 228,273 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | safe_makedirs | def safe_makedirs(path, uid=-1, gid=-1):
""" create path recursively if it doesn't exist """
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
os.chown(path, uid, gid) | python | def safe_makedirs(path, uid=-1, gid=-1):
""" create path recursively if it doesn't exist """
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
else:
os.chown(path, uid, gid) | [
"def",
"safe_makedirs",
"(",
"path",
",",
"uid",
"=",
"-",
"1",
",",
"gid",
"=",
"-",
"1",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"EEXIST"... | create path recursively if it doesn't exist | [
"create",
"path",
"recursively",
"if",
"it",
"doesn",
"t",
"exist"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L371-L381 | train | 228,274 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | zeroing | def zeroing(dev):
""" zeroing last few blocks of device """
# this kills the crab
#
# sgdisk will wipe out the main copy of the GPT partition
# table (sorry), but it doesn't remove the backup copies, and
# subsequent commands will continue to complain and fail when
# they see those. zeroing the last few blocks of the device
# appears to do the trick.
lba_size = 4096
size = 33 * lba_size
return True
with open(dev, 'wb') as f:
f.seek(-size, os.SEEK_END)
f.write(size*b'\0') | python | def zeroing(dev):
""" zeroing last few blocks of device """
# this kills the crab
#
# sgdisk will wipe out the main copy of the GPT partition
# table (sorry), but it doesn't remove the backup copies, and
# subsequent commands will continue to complain and fail when
# they see those. zeroing the last few blocks of the device
# appears to do the trick.
lba_size = 4096
size = 33 * lba_size
return True
with open(dev, 'wb') as f:
f.seek(-size, os.SEEK_END)
f.write(size*b'\0') | [
"def",
"zeroing",
"(",
"dev",
")",
":",
"# this kills the crab",
"#",
"# sgdisk will wipe out the main copy of the GPT partition",
"# table (sorry), but it doesn't remove the backup copies, and",
"# subsequent commands will continue to complain and fail when",
"# they see those. zeroing the l... | zeroing last few blocks of device | [
"zeroing",
"last",
"few",
"blocks",
"of",
"device"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L384-L398 | train | 228,275 |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | enable_yum_priority_obsoletes | def enable_yum_priority_obsoletes(path="/etc/yum/pluginconf.d/priorities.conf"):
"""Configure Yum priorities to include obsoletes"""
config = configparser.ConfigParser()
config.read(path)
config.set('main', 'check_obsoletes', '1')
with open(path, 'w') as fout:
config.write(fout) | python | def enable_yum_priority_obsoletes(path="/etc/yum/pluginconf.d/priorities.conf"):
"""Configure Yum priorities to include obsoletes"""
config = configparser.ConfigParser()
config.read(path)
config.set('main', 'check_obsoletes', '1')
with open(path, 'w') as fout:
config.write(fout) | [
"def",
"enable_yum_priority_obsoletes",
"(",
"path",
"=",
"\"/etc/yum/pluginconf.d/priorities.conf\"",
")",
":",
"config",
"=",
"configparser",
".",
"ConfigParser",
"(",
")",
"config",
".",
"read",
"(",
"path",
")",
"config",
".",
"set",
"(",
"'main'",
",",
"'ch... | Configure Yum priorities to include obsoletes | [
"Configure",
"Yum",
"priorities",
"to",
"include",
"obsoletes"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L401-L407 | train | 228,276 |
ceph/ceph-deploy | vendor.py | vendorize | def vendorize(vendor_requirements):
"""
This is the main entry point for vendorizing requirements. It expects
a list of tuples that should contain the name of the library and the
version.
For example, a library ``foo`` with version ``0.0.1`` would look like::
vendor_requirements = [
('foo', '0.0.1'),
]
"""
for library in vendor_requirements:
if len(library) == 2:
name, version = library
cmd = None
elif len(library) == 3: # a possible cmd we need to run
name, version, cmd = library
vendor_library(name, version, cmd) | python | def vendorize(vendor_requirements):
"""
This is the main entry point for vendorizing requirements. It expects
a list of tuples that should contain the name of the library and the
version.
For example, a library ``foo`` with version ``0.0.1`` would look like::
vendor_requirements = [
('foo', '0.0.1'),
]
"""
for library in vendor_requirements:
if len(library) == 2:
name, version = library
cmd = None
elif len(library) == 3: # a possible cmd we need to run
name, version, cmd = library
vendor_library(name, version, cmd) | [
"def",
"vendorize",
"(",
"vendor_requirements",
")",
":",
"for",
"library",
"in",
"vendor_requirements",
":",
"if",
"len",
"(",
"library",
")",
"==",
"2",
":",
"name",
",",
"version",
"=",
"library",
"cmd",
"=",
"None",
"elif",
"len",
"(",
"library",
")"... | This is the main entry point for vendorizing requirements. It expects
a list of tuples that should contain the name of the library and the
version.
For example, a library ``foo`` with version ``0.0.1`` would look like::
vendor_requirements = [
('foo', '0.0.1'),
] | [
"This",
"is",
"the",
"main",
"entry",
"point",
"for",
"vendorizing",
"requirements",
".",
"It",
"expects",
"a",
"list",
"of",
"tuples",
"that",
"should",
"contain",
"the",
"name",
"of",
"the",
"library",
"and",
"the",
"version",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/vendor.py#L93-L112 | train | 228,277 |
ceph/ceph-deploy | ceph_deploy/gatherkeys.py | _keyring_equivalent | def _keyring_equivalent(keyring_one, keyring_two):
"""
Check two keyrings are identical
"""
def keyring_extract_key(file_path):
"""
Cephx keyring files may or may not have white space before some lines.
They may have some values in quotes, so a safe way to compare is to
extract the key.
"""
with open(file_path) as f:
for line in f:
content = line.strip()
if len(content) == 0:
continue
split_line = content.split('=')
if split_line[0].strip() == 'key':
return "=".join(split_line[1:]).strip()
raise RuntimeError("File '%s' is not a keyring" % file_path)
key_one = keyring_extract_key(keyring_one)
key_two = keyring_extract_key(keyring_two)
return key_one == key_two | python | def _keyring_equivalent(keyring_one, keyring_two):
"""
Check two keyrings are identical
"""
def keyring_extract_key(file_path):
"""
Cephx keyring files may or may not have white space before some lines.
They may have some values in quotes, so a safe way to compare is to
extract the key.
"""
with open(file_path) as f:
for line in f:
content = line.strip()
if len(content) == 0:
continue
split_line = content.split('=')
if split_line[0].strip() == 'key':
return "=".join(split_line[1:]).strip()
raise RuntimeError("File '%s' is not a keyring" % file_path)
key_one = keyring_extract_key(keyring_one)
key_two = keyring_extract_key(keyring_two)
return key_one == key_two | [
"def",
"_keyring_equivalent",
"(",
"keyring_one",
",",
"keyring_two",
")",
":",
"def",
"keyring_extract_key",
"(",
"file_path",
")",
":",
"\"\"\"\n Cephx keyring files may or may not have white space before some lines.\n They may have some values in quotes, so a safe way to... | Check two keyrings are identical | [
"Check",
"two",
"keyrings",
"are",
"identical"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/gatherkeys.py#L17-L38 | train | 228,278 |
ceph/ceph-deploy | ceph_deploy/gatherkeys.py | keytype_path_to | def keytype_path_to(args, keytype):
"""
Get the local filename for a keyring type
"""
if keytype == "admin":
return '{cluster}.client.admin.keyring'.format(
cluster=args.cluster)
if keytype == "mon":
return '{cluster}.mon.keyring'.format(
cluster=args.cluster)
return '{cluster}.bootstrap-{what}.keyring'.format(
cluster=args.cluster,
what=keytype) | python | def keytype_path_to(args, keytype):
"""
Get the local filename for a keyring type
"""
if keytype == "admin":
return '{cluster}.client.admin.keyring'.format(
cluster=args.cluster)
if keytype == "mon":
return '{cluster}.mon.keyring'.format(
cluster=args.cluster)
return '{cluster}.bootstrap-{what}.keyring'.format(
cluster=args.cluster,
what=keytype) | [
"def",
"keytype_path_to",
"(",
"args",
",",
"keytype",
")",
":",
"if",
"keytype",
"==",
"\"admin\"",
":",
"return",
"'{cluster}.client.admin.keyring'",
".",
"format",
"(",
"cluster",
"=",
"args",
".",
"cluster",
")",
"if",
"keytype",
"==",
"\"mon\"",
":",
"r... | Get the local filename for a keyring type | [
"Get",
"the",
"local",
"filename",
"for",
"a",
"keyring",
"type"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/gatherkeys.py#L41-L53 | train | 228,279 |
ceph/ceph-deploy | ceph_deploy/gatherkeys.py | gatherkeys_missing | def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir):
"""
Get or create the keyring from the mon using the mon keyring by keytype and
copy to dest_dir
"""
args_prefix = [
'/usr/bin/ceph',
'--connect-timeout=25',
'--cluster={cluster}'.format(
cluster=args.cluster),
'--name', 'mon.',
'--keyring={keypath}'.format(
keypath=keypath),
]
identity = keytype_identity(keytype)
if identity is None:
raise RuntimeError('Could not find identity for keytype:%s' % keytype)
capabilites = keytype_capabilities(keytype)
if capabilites is None:
raise RuntimeError('Could not find capabilites for keytype:%s' % keytype)
# First try getting the key if it already exists, to handle the case where
# it exists but doesn't match the caps we would pass into get-or-create.
# This is the same behvaior as in newer ceph-create-keys
out, err, code = remoto.process.check(
distro.conn,
args_prefix + ['auth', 'get', identity]
)
if code == errno.ENOENT:
out, err, code = remoto.process.check(
distro.conn,
args_prefix + ['auth', 'get-or-create', identity] + capabilites
)
if code != 0:
rlogger.error(
'"ceph auth get-or-create for keytype %s returned %s',
keytype, code
)
for line in err:
rlogger.debug(line)
return False
keyring_name_local = keytype_path_to(args, keytype)
keyring_path_local = os.path.join(dest_dir, keyring_name_local)
with open(keyring_path_local, 'wb') as f:
for line in out:
f.write(line + b'\n')
return True | python | def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir):
"""
Get or create the keyring from the mon using the mon keyring by keytype and
copy to dest_dir
"""
args_prefix = [
'/usr/bin/ceph',
'--connect-timeout=25',
'--cluster={cluster}'.format(
cluster=args.cluster),
'--name', 'mon.',
'--keyring={keypath}'.format(
keypath=keypath),
]
identity = keytype_identity(keytype)
if identity is None:
raise RuntimeError('Could not find identity for keytype:%s' % keytype)
capabilites = keytype_capabilities(keytype)
if capabilites is None:
raise RuntimeError('Could not find capabilites for keytype:%s' % keytype)
# First try getting the key if it already exists, to handle the case where
# it exists but doesn't match the caps we would pass into get-or-create.
# This is the same behvaior as in newer ceph-create-keys
out, err, code = remoto.process.check(
distro.conn,
args_prefix + ['auth', 'get', identity]
)
if code == errno.ENOENT:
out, err, code = remoto.process.check(
distro.conn,
args_prefix + ['auth', 'get-or-create', identity] + capabilites
)
if code != 0:
rlogger.error(
'"ceph auth get-or-create for keytype %s returned %s',
keytype, code
)
for line in err:
rlogger.debug(line)
return False
keyring_name_local = keytype_path_to(args, keytype)
keyring_path_local = os.path.join(dest_dir, keyring_name_local)
with open(keyring_path_local, 'wb') as f:
for line in out:
f.write(line + b'\n')
return True | [
"def",
"gatherkeys_missing",
"(",
"args",
",",
"distro",
",",
"rlogger",
",",
"keypath",
",",
"keytype",
",",
"dest_dir",
")",
":",
"args_prefix",
"=",
"[",
"'/usr/bin/ceph'",
",",
"'--connect-timeout=25'",
",",
"'--cluster={cluster}'",
".",
"format",
"(",
"clus... | Get or create the keyring from the mon using the mon keyring by keytype and
copy to dest_dir | [
"Get",
"or",
"create",
"the",
"keyring",
"from",
"the",
"mon",
"using",
"the",
"mon",
"keyring",
"by",
"keytype",
"and",
"copy",
"to",
"dest_dir"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/gatherkeys.py#L100-L147 | train | 228,280 |
ceph/ceph-deploy | ceph_deploy/gatherkeys.py | gatherkeys_with_mon | def gatherkeys_with_mon(args, host, dest_dir):
"""
Connect to mon and gather keys if mon is in quorum.
"""
distro = hosts.get(host, username=args.username)
remote_hostname = distro.conn.remote_module.shortname()
dir_keytype_mon = ceph_deploy.util.paths.mon.path(args.cluster, remote_hostname)
path_keytype_mon = "%s/keyring" % (dir_keytype_mon)
mon_key = distro.conn.remote_module.get_file(path_keytype_mon)
if mon_key is None:
LOG.warning("No mon key found in host: %s", host)
return False
mon_name_local = keytype_path_to(args, "mon")
mon_path_local = os.path.join(dest_dir, mon_name_local)
with open(mon_path_local, 'wb') as f:
f.write(mon_key)
rlogger = logging.getLogger(host)
path_asok = ceph_deploy.util.paths.mon.asok(args.cluster, remote_hostname)
out, err, code = remoto.process.check(
distro.conn,
[
"/usr/bin/ceph",
"--connect-timeout=25",
"--cluster={cluster}".format(
cluster=args.cluster),
"--admin-daemon={asok}".format(
asok=path_asok),
"mon_status"
]
)
if code != 0:
rlogger.error('"ceph mon_status %s" returned %s', host, code)
for line in err:
rlogger.debug(line)
return False
try:
mon_status = json.loads(b''.join(out).decode('utf-8'))
except ValueError:
rlogger.error('"ceph mon_status %s" output was not json', host)
for line in out:
rlogger.error(line)
return False
mon_number = None
mon_map = mon_status.get('monmap')
if mon_map is None:
rlogger.error("could not find mon map for mons on '%s'", host)
return False
mon_quorum = mon_status.get('quorum')
if mon_quorum is None:
rlogger.error("could not find quorum for mons on '%s'" , host)
return False
mon_map_mons = mon_map.get('mons')
if mon_map_mons is None:
rlogger.error("could not find mons in monmap on '%s'", host)
return False
for mon in mon_map_mons:
if mon.get('name') == remote_hostname:
mon_number = mon.get('rank')
break
if mon_number is None:
rlogger.error("could not find '%s' in monmap", remote_hostname)
return False
if not mon_number in mon_quorum:
rlogger.error("Not yet quorum for '%s'", host)
return False
for keytype in ["admin", "mds", "mgr", "osd", "rgw"]:
if not gatherkeys_missing(args, distro, rlogger, path_keytype_mon, keytype, dest_dir):
# We will return failure if we fail to gather any key
rlogger.error("Failed to return '%s' key from host %s", keytype, host)
return False
return True | python | def gatherkeys_with_mon(args, host, dest_dir):
"""
Connect to mon and gather keys if mon is in quorum.
"""
distro = hosts.get(host, username=args.username)
remote_hostname = distro.conn.remote_module.shortname()
dir_keytype_mon = ceph_deploy.util.paths.mon.path(args.cluster, remote_hostname)
path_keytype_mon = "%s/keyring" % (dir_keytype_mon)
mon_key = distro.conn.remote_module.get_file(path_keytype_mon)
if mon_key is None:
LOG.warning("No mon key found in host: %s", host)
return False
mon_name_local = keytype_path_to(args, "mon")
mon_path_local = os.path.join(dest_dir, mon_name_local)
with open(mon_path_local, 'wb') as f:
f.write(mon_key)
rlogger = logging.getLogger(host)
path_asok = ceph_deploy.util.paths.mon.asok(args.cluster, remote_hostname)
out, err, code = remoto.process.check(
distro.conn,
[
"/usr/bin/ceph",
"--connect-timeout=25",
"--cluster={cluster}".format(
cluster=args.cluster),
"--admin-daemon={asok}".format(
asok=path_asok),
"mon_status"
]
)
if code != 0:
rlogger.error('"ceph mon_status %s" returned %s', host, code)
for line in err:
rlogger.debug(line)
return False
try:
mon_status = json.loads(b''.join(out).decode('utf-8'))
except ValueError:
rlogger.error('"ceph mon_status %s" output was not json', host)
for line in out:
rlogger.error(line)
return False
mon_number = None
mon_map = mon_status.get('monmap')
if mon_map is None:
rlogger.error("could not find mon map for mons on '%s'", host)
return False
mon_quorum = mon_status.get('quorum')
if mon_quorum is None:
rlogger.error("could not find quorum for mons on '%s'" , host)
return False
mon_map_mons = mon_map.get('mons')
if mon_map_mons is None:
rlogger.error("could not find mons in monmap on '%s'", host)
return False
for mon in mon_map_mons:
if mon.get('name') == remote_hostname:
mon_number = mon.get('rank')
break
if mon_number is None:
rlogger.error("could not find '%s' in monmap", remote_hostname)
return False
if not mon_number in mon_quorum:
rlogger.error("Not yet quorum for '%s'", host)
return False
for keytype in ["admin", "mds", "mgr", "osd", "rgw"]:
if not gatherkeys_missing(args, distro, rlogger, path_keytype_mon, keytype, dest_dir):
# We will return failure if we fail to gather any key
rlogger.error("Failed to return '%s' key from host %s", keytype, host)
return False
return True | [
"def",
"gatherkeys_with_mon",
"(",
"args",
",",
"host",
",",
"dest_dir",
")",
":",
"distro",
"=",
"hosts",
".",
"get",
"(",
"host",
",",
"username",
"=",
"args",
".",
"username",
")",
"remote_hostname",
"=",
"distro",
".",
"conn",
".",
"remote_module",
"... | Connect to mon and gather keys if mon is in quorum. | [
"Connect",
"to",
"mon",
"and",
"gather",
"keys",
"if",
"mon",
"is",
"in",
"quorum",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/gatherkeys.py#L150-L220 | train | 228,281 |
ceph/ceph-deploy | ceph_deploy/gatherkeys.py | gatherkeys | def gatherkeys(args):
"""
Gather keys from any mon and store in current working directory.
Backs up keys from previous installs and stores new keys.
"""
oldmask = os.umask(0o77)
try:
try:
tmpd = tempfile.mkdtemp()
LOG.info("Storing keys in temp directory %s", tmpd)
sucess = False
for host in args.mon:
sucess = gatherkeys_with_mon(args, host, tmpd)
if sucess:
break
if not sucess:
LOG.error("Failed to connect to host:%s" ,', '.join(args.mon))
raise RuntimeError('Failed to connect any mon')
had_error = False
date_string = time.strftime("%Y%m%d%H%M%S")
for keytype in ["admin", "mds", "mgr", "mon", "osd", "rgw"]:
filename = keytype_path_to(args, keytype)
tmp_path = os.path.join(tmpd, filename)
if not os.path.exists(tmp_path):
LOG.error("No key retrived for '%s'" , keytype)
had_error = True
continue
if not os.path.exists(filename):
LOG.info("Storing %s" % (filename))
shutil.move(tmp_path, filename)
continue
if _keyring_equivalent(tmp_path, filename):
LOG.info("keyring '%s' already exists" , filename)
continue
backup_keyring = "%s-%s" % (filename, date_string)
LOG.info("Replacing '%s' and backing up old key as '%s'", filename, backup_keyring)
shutil.copy(filename, backup_keyring)
shutil.move(tmp_path, filename)
if had_error:
raise RuntimeError('Failed to get all key types')
finally:
LOG.info("Destroy temp directory %s" %(tmpd))
shutil.rmtree(tmpd)
finally:
os.umask(oldmask) | python | def gatherkeys(args):
"""
Gather keys from any mon and store in current working directory.
Backs up keys from previous installs and stores new keys.
"""
oldmask = os.umask(0o77)
try:
try:
tmpd = tempfile.mkdtemp()
LOG.info("Storing keys in temp directory %s", tmpd)
sucess = False
for host in args.mon:
sucess = gatherkeys_with_mon(args, host, tmpd)
if sucess:
break
if not sucess:
LOG.error("Failed to connect to host:%s" ,', '.join(args.mon))
raise RuntimeError('Failed to connect any mon')
had_error = False
date_string = time.strftime("%Y%m%d%H%M%S")
for keytype in ["admin", "mds", "mgr", "mon", "osd", "rgw"]:
filename = keytype_path_to(args, keytype)
tmp_path = os.path.join(tmpd, filename)
if not os.path.exists(tmp_path):
LOG.error("No key retrived for '%s'" , keytype)
had_error = True
continue
if not os.path.exists(filename):
LOG.info("Storing %s" % (filename))
shutil.move(tmp_path, filename)
continue
if _keyring_equivalent(tmp_path, filename):
LOG.info("keyring '%s' already exists" , filename)
continue
backup_keyring = "%s-%s" % (filename, date_string)
LOG.info("Replacing '%s' and backing up old key as '%s'", filename, backup_keyring)
shutil.copy(filename, backup_keyring)
shutil.move(tmp_path, filename)
if had_error:
raise RuntimeError('Failed to get all key types')
finally:
LOG.info("Destroy temp directory %s" %(tmpd))
shutil.rmtree(tmpd)
finally:
os.umask(oldmask) | [
"def",
"gatherkeys",
"(",
"args",
")",
":",
"oldmask",
"=",
"os",
".",
"umask",
"(",
"0o77",
")",
"try",
":",
"try",
":",
"tmpd",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"LOG",
".",
"info",
"(",
"\"Storing keys in temp directory %s\"",
",",
"tmpd",
... | Gather keys from any mon and store in current working directory.
Backs up keys from previous installs and stores new keys. | [
"Gather",
"keys",
"from",
"any",
"mon",
"and",
"store",
"in",
"current",
"working",
"directory",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/gatherkeys.py#L223-L268 | train | 228,282 |
ceph/ceph-deploy | ceph_deploy/gatherkeys.py | make | def make(parser):
"""
Gather authentication keys for provisioning new nodes.
"""
parser.add_argument(
'mon',
metavar='HOST',
nargs='+',
help='monitor host to pull keys from',
)
parser.set_defaults(
func=gatherkeys,
) | python | def make(parser):
"""
Gather authentication keys for provisioning new nodes.
"""
parser.add_argument(
'mon',
metavar='HOST',
nargs='+',
help='monitor host to pull keys from',
)
parser.set_defaults(
func=gatherkeys,
) | [
"def",
"make",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'mon'",
",",
"metavar",
"=",
"'HOST'",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"'monitor host to pull keys from'",
",",
")",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
... | Gather authentication keys for provisioning new nodes. | [
"Gather",
"authentication",
"keys",
"for",
"provisioning",
"new",
"nodes",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/gatherkeys.py#L272-L284 | train | 228,283 |
ceph/ceph-deploy | ceph_deploy/hosts/__init__.py | get | def get(hostname,
username=None,
fallback=None,
detect_sudo=True,
use_rhceph=False,
callbacks=None):
"""
Retrieve the module that matches the distribution of a ``hostname``. This
function will connect to that host and retrieve the distribution
information, then return the appropriate module and slap a few attributes
to that module defining the information it found from the hostname.
For example, if host ``node1.example.com`` is an Ubuntu server, the
``debian`` module would be returned and the following would be set::
module.name = 'ubuntu'
module.release = '12.04'
module.codename = 'precise'
:param hostname: A hostname that is reachable/resolvable over the network
:param fallback: Optional fallback to use if no supported distro is found
:param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or
the community distro. Changes what host module is
returned for RHEL.
:params callbacks: A list of callables that accept one argument (the actual
module that contains the connection) that will be
called, in order at the end of the instantiation of the
module.
"""
conn = get_connection(
hostname,
username=username,
logger=logging.getLogger(hostname),
detect_sudo=detect_sudo
)
try:
conn.import_module(remotes)
except IOError as error:
if 'already closed' in getattr(error, 'message', ''):
raise RuntimeError('remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname)
distro_name, release, codename = conn.remote_module.platform_information()
if not codename or not _get_distro(distro_name):
raise exc.UnsupportedPlatform(
distro=distro_name,
codename=codename,
release=release)
machine_type = conn.remote_module.machine_type()
module = _get_distro(distro_name, use_rhceph=use_rhceph)
module.name = distro_name
module.normalized_name = _normalized_distro_name(distro_name)
module.normalized_release = _normalized_release(release)
module.distro = module.normalized_name
module.is_el = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific', 'oracle', 'virtuozzo']
module.is_rpm = module.normalized_name in ['redhat', 'centos',
'fedora', 'scientific', 'suse', 'oracle', 'virtuozzo', 'alt']
module.is_deb = module.normalized_name in ['debian', 'ubuntu']
module.is_pkgtarxz = module.normalized_name in ['arch']
module.release = release
module.codename = codename
module.conn = conn
module.machine_type = machine_type
module.init = module.choose_init(module)
module.packager = module.get_packager(module)
# execute each callback if any
if callbacks:
for c in callbacks:
c(module)
return module | python | def get(hostname,
username=None,
fallback=None,
detect_sudo=True,
use_rhceph=False,
callbacks=None):
"""
Retrieve the module that matches the distribution of a ``hostname``. This
function will connect to that host and retrieve the distribution
information, then return the appropriate module and slap a few attributes
to that module defining the information it found from the hostname.
For example, if host ``node1.example.com`` is an Ubuntu server, the
``debian`` module would be returned and the following would be set::
module.name = 'ubuntu'
module.release = '12.04'
module.codename = 'precise'
:param hostname: A hostname that is reachable/resolvable over the network
:param fallback: Optional fallback to use if no supported distro is found
:param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or
the community distro. Changes what host module is
returned for RHEL.
:params callbacks: A list of callables that accept one argument (the actual
module that contains the connection) that will be
called, in order at the end of the instantiation of the
module.
"""
conn = get_connection(
hostname,
username=username,
logger=logging.getLogger(hostname),
detect_sudo=detect_sudo
)
try:
conn.import_module(remotes)
except IOError as error:
if 'already closed' in getattr(error, 'message', ''):
raise RuntimeError('remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname)
distro_name, release, codename = conn.remote_module.platform_information()
if not codename or not _get_distro(distro_name):
raise exc.UnsupportedPlatform(
distro=distro_name,
codename=codename,
release=release)
machine_type = conn.remote_module.machine_type()
module = _get_distro(distro_name, use_rhceph=use_rhceph)
module.name = distro_name
module.normalized_name = _normalized_distro_name(distro_name)
module.normalized_release = _normalized_release(release)
module.distro = module.normalized_name
module.is_el = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific', 'oracle', 'virtuozzo']
module.is_rpm = module.normalized_name in ['redhat', 'centos',
'fedora', 'scientific', 'suse', 'oracle', 'virtuozzo', 'alt']
module.is_deb = module.normalized_name in ['debian', 'ubuntu']
module.is_pkgtarxz = module.normalized_name in ['arch']
module.release = release
module.codename = codename
module.conn = conn
module.machine_type = machine_type
module.init = module.choose_init(module)
module.packager = module.get_packager(module)
# execute each callback if any
if callbacks:
for c in callbacks:
c(module)
return module | [
"def",
"get",
"(",
"hostname",
",",
"username",
"=",
"None",
",",
"fallback",
"=",
"None",
",",
"detect_sudo",
"=",
"True",
",",
"use_rhceph",
"=",
"False",
",",
"callbacks",
"=",
"None",
")",
":",
"conn",
"=",
"get_connection",
"(",
"hostname",
",",
"... | Retrieve the module that matches the distribution of a ``hostname``. This
function will connect to that host and retrieve the distribution
information, then return the appropriate module and slap a few attributes
to that module defining the information it found from the hostname.
For example, if host ``node1.example.com`` is an Ubuntu server, the
``debian`` module would be returned and the following would be set::
module.name = 'ubuntu'
module.release = '12.04'
module.codename = 'precise'
:param hostname: A hostname that is reachable/resolvable over the network
:param fallback: Optional fallback to use if no supported distro is found
:param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or
the community distro. Changes what host module is
returned for RHEL.
:params callbacks: A list of callables that accept one argument (the actual
module that contains the connection) that will be
called, in order at the end of the instantiation of the
module. | [
"Retrieve",
"the",
"module",
"that",
"matches",
"the",
"distribution",
"of",
"a",
"hostname",
".",
"This",
"function",
"will",
"connect",
"to",
"that",
"host",
"and",
"retrieve",
"the",
"distribution",
"information",
"then",
"return",
"the",
"appropriate",
"modu... | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/__init__.py#L16-L84 | train | 228,284 |
ceph/ceph-deploy | ceph_deploy/connection.py | get_connection | def get_connection(hostname, username, logger, threads=5, use_sudo=None, detect_sudo=True):
"""
A very simple helper, meant to return a connection
that will know about the need to use sudo.
"""
if username:
hostname = "%s@%s" % (username, hostname)
try:
conn = remoto.Connection(
hostname,
logger=logger,
threads=threads,
detect_sudo=detect_sudo,
)
# Set a timeout value in seconds to disconnect and move on
# if no data is sent back.
conn.global_timeout = 300
logger.debug("connected to host: %s " % hostname)
return conn
except Exception as error:
msg = "connecting to host: %s " % hostname
errors = "resulted in errors: %s %s" % (error.__class__.__name__, error)
raise RuntimeError(msg + errors) | python | def get_connection(hostname, username, logger, threads=5, use_sudo=None, detect_sudo=True):
"""
A very simple helper, meant to return a connection
that will know about the need to use sudo.
"""
if username:
hostname = "%s@%s" % (username, hostname)
try:
conn = remoto.Connection(
hostname,
logger=logger,
threads=threads,
detect_sudo=detect_sudo,
)
# Set a timeout value in seconds to disconnect and move on
# if no data is sent back.
conn.global_timeout = 300
logger.debug("connected to host: %s " % hostname)
return conn
except Exception as error:
msg = "connecting to host: %s " % hostname
errors = "resulted in errors: %s %s" % (error.__class__.__name__, error)
raise RuntimeError(msg + errors) | [
"def",
"get_connection",
"(",
"hostname",
",",
"username",
",",
"logger",
",",
"threads",
"=",
"5",
",",
"use_sudo",
"=",
"None",
",",
"detect_sudo",
"=",
"True",
")",
":",
"if",
"username",
":",
"hostname",
"=",
"\"%s@%s\"",
"%",
"(",
"username",
",",
... | A very simple helper, meant to return a connection
that will know about the need to use sudo. | [
"A",
"very",
"simple",
"helper",
"meant",
"to",
"return",
"a",
"connection",
"that",
"will",
"know",
"about",
"the",
"need",
"to",
"use",
"sudo",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/connection.py#L5-L29 | train | 228,285 |
ceph/ceph-deploy | ceph_deploy/connection.py | get_local_connection | def get_local_connection(logger, use_sudo=False):
"""
Helper for local connections that are sometimes needed to operate
on local hosts
"""
return get_connection(
socket.gethostname(), # cannot rely on 'localhost' here
None,
logger=logger,
threads=1,
use_sudo=use_sudo,
detect_sudo=False
) | python | def get_local_connection(logger, use_sudo=False):
"""
Helper for local connections that are sometimes needed to operate
on local hosts
"""
return get_connection(
socket.gethostname(), # cannot rely on 'localhost' here
None,
logger=logger,
threads=1,
use_sudo=use_sudo,
detect_sudo=False
) | [
"def",
"get_local_connection",
"(",
"logger",
",",
"use_sudo",
"=",
"False",
")",
":",
"return",
"get_connection",
"(",
"socket",
".",
"gethostname",
"(",
")",
",",
"# cannot rely on 'localhost' here",
"None",
",",
"logger",
"=",
"logger",
",",
"threads",
"=",
... | Helper for local connections that are sometimes needed to operate
on local hosts | [
"Helper",
"for",
"local",
"connections",
"that",
"are",
"sometimes",
"needed",
"to",
"operate",
"on",
"local",
"hosts"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/connection.py#L32-L44 | train | 228,286 |
ceph/ceph-deploy | ceph_deploy/mgr.py | make | def make(parser):
"""
Ceph MGR daemon management
"""
mgr_parser = parser.add_subparsers(dest='subcommand')
mgr_parser.required = True
mgr_create = mgr_parser.add_parser(
'create',
help='Deploy Ceph MGR on remote host(s)'
)
mgr_create.add_argument(
'mgr',
metavar='HOST[:NAME]',
nargs='+',
type=colon_separated,
help='host (and optionally the daemon name) to deploy on',
)
parser.set_defaults(
func=mgr,
) | python | def make(parser):
"""
Ceph MGR daemon management
"""
mgr_parser = parser.add_subparsers(dest='subcommand')
mgr_parser.required = True
mgr_create = mgr_parser.add_parser(
'create',
help='Deploy Ceph MGR on remote host(s)'
)
mgr_create.add_argument(
'mgr',
metavar='HOST[:NAME]',
nargs='+',
type=colon_separated,
help='host (and optionally the daemon name) to deploy on',
)
parser.set_defaults(
func=mgr,
) | [
"def",
"make",
"(",
"parser",
")",
":",
"mgr_parser",
"=",
"parser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'subcommand'",
")",
"mgr_parser",
".",
"required",
"=",
"True",
"mgr_create",
"=",
"mgr_parser",
".",
"add_parser",
"(",
"'create'",
",",
"help",
... | Ceph MGR daemon management | [
"Ceph",
"MGR",
"daemon",
"management"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/mgr.py#L206-L226 | train | 228,287 |
ceph/ceph-deploy | ceph_deploy/pkg.py | make | def make(parser):
"""
Manage packages on remote hosts.
"""
action = parser.add_mutually_exclusive_group()
action.add_argument(
'--install',
metavar='PKG(s)',
help='Comma-separated package(s) to install',
)
action.add_argument(
'--remove',
metavar='PKG(s)',
help='Comma-separated package(s) to remove',
)
parser.add_argument(
'hosts',
nargs='+',
)
parser.set_defaults(
func=pkg,
) | python | def make(parser):
"""
Manage packages on remote hosts.
"""
action = parser.add_mutually_exclusive_group()
action.add_argument(
'--install',
metavar='PKG(s)',
help='Comma-separated package(s) to install',
)
action.add_argument(
'--remove',
metavar='PKG(s)',
help='Comma-separated package(s) to remove',
)
parser.add_argument(
'hosts',
nargs='+',
)
parser.set_defaults(
func=pkg,
) | [
"def",
"make",
"(",
"parser",
")",
":",
"action",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
")",
"action",
".",
"add_argument",
"(",
"'--install'",
",",
"metavar",
"=",
"'PKG(s)'",
",",
"help",
"=",
"'Comma-separated package(s) to install'",
",",
... | Manage packages on remote hosts. | [
"Manage",
"packages",
"on",
"remote",
"hosts",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/pkg.py#L60-L86 | train | 228,288 |
ceph/ceph-deploy | ceph_deploy/osd.py | get_bootstrap_osd_key | def get_bootstrap_osd_key(cluster):
"""
Read the bootstrap-osd key for `cluster`.
"""
path = '{cluster}.bootstrap-osd.keyring'.format(cluster=cluster)
try:
with open(path, 'rb') as f:
return f.read()
except IOError:
raise RuntimeError('bootstrap-osd keyring not found; run \'gatherkeys\'') | python | def get_bootstrap_osd_key(cluster):
"""
Read the bootstrap-osd key for `cluster`.
"""
path = '{cluster}.bootstrap-osd.keyring'.format(cluster=cluster)
try:
with open(path, 'rb') as f:
return f.read()
except IOError:
raise RuntimeError('bootstrap-osd keyring not found; run \'gatherkeys\'') | [
"def",
"get_bootstrap_osd_key",
"(",
"cluster",
")",
":",
"path",
"=",
"'{cluster}.bootstrap-osd.keyring'",
".",
"format",
"(",
"cluster",
"=",
"cluster",
")",
"try",
":",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"f",
".",
... | Read the bootstrap-osd key for `cluster`. | [
"Read",
"the",
"bootstrap",
"-",
"osd",
"key",
"for",
"cluster",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/osd.py#L17-L26 | train | 228,289 |
ceph/ceph-deploy | ceph_deploy/osd.py | create_osd_keyring | def create_osd_keyring(conn, cluster, key):
"""
Run on osd node, writes the bootstrap key if not there yet.
"""
logger = conn.logger
path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring'.format(
cluster=cluster,
)
if not conn.remote_module.path_exists(path):
logger.warning('osd keyring does not exist yet, creating one')
conn.remote_module.write_keyring(path, key) | python | def create_osd_keyring(conn, cluster, key):
"""
Run on osd node, writes the bootstrap key if not there yet.
"""
logger = conn.logger
path = '/var/lib/ceph/bootstrap-osd/{cluster}.keyring'.format(
cluster=cluster,
)
if not conn.remote_module.path_exists(path):
logger.warning('osd keyring does not exist yet, creating one')
conn.remote_module.write_keyring(path, key) | [
"def",
"create_osd_keyring",
"(",
"conn",
",",
"cluster",
",",
"key",
")",
":",
"logger",
"=",
"conn",
".",
"logger",
"path",
"=",
"'/var/lib/ceph/bootstrap-osd/{cluster}.keyring'",
".",
"format",
"(",
"cluster",
"=",
"cluster",
",",
")",
"if",
"not",
"conn",
... | Run on osd node, writes the bootstrap key if not there yet. | [
"Run",
"on",
"osd",
"node",
"writes",
"the",
"bootstrap",
"key",
"if",
"not",
"there",
"yet",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/osd.py#L29-L39 | train | 228,290 |
ceph/ceph-deploy | ceph_deploy/osd.py | osd_tree | def osd_tree(conn, cluster):
"""
Check the status of an OSD. Make sure all are up and in
What good output would look like::
{
"epoch": 8,
"num_osds": 1,
"num_up_osds": 1,
"num_in_osds": "1",
"full": "false",
"nearfull": "false"
}
Note how the booleans are actually strings, so we need to take that into
account and fix it before returning the dictionary. Issue #8108
"""
ceph_executable = system.executable_path(conn, 'ceph')
command = [
ceph_executable,
'--cluster={cluster}'.format(cluster=cluster),
'osd',
'tree',
'--format=json',
]
out, err, code = remoto.process.check(
conn,
command,
)
try:
loaded_json = json.loads(b''.join(out).decode('utf-8'))
# convert boolean strings to actual booleans because
# --format=json fails to do this properly
for k, v in loaded_json.items():
if v == 'true':
loaded_json[k] = True
elif v == 'false':
loaded_json[k] = False
return loaded_json
except ValueError:
return {} | python | def osd_tree(conn, cluster):
"""
Check the status of an OSD. Make sure all are up and in
What good output would look like::
{
"epoch": 8,
"num_osds": 1,
"num_up_osds": 1,
"num_in_osds": "1",
"full": "false",
"nearfull": "false"
}
Note how the booleans are actually strings, so we need to take that into
account and fix it before returning the dictionary. Issue #8108
"""
ceph_executable = system.executable_path(conn, 'ceph')
command = [
ceph_executable,
'--cluster={cluster}'.format(cluster=cluster),
'osd',
'tree',
'--format=json',
]
out, err, code = remoto.process.check(
conn,
command,
)
try:
loaded_json = json.loads(b''.join(out).decode('utf-8'))
# convert boolean strings to actual booleans because
# --format=json fails to do this properly
for k, v in loaded_json.items():
if v == 'true':
loaded_json[k] = True
elif v == 'false':
loaded_json[k] = False
return loaded_json
except ValueError:
return {} | [
"def",
"osd_tree",
"(",
"conn",
",",
"cluster",
")",
":",
"ceph_executable",
"=",
"system",
".",
"executable_path",
"(",
"conn",
",",
"'ceph'",
")",
"command",
"=",
"[",
"ceph_executable",
",",
"'--cluster={cluster}'",
".",
"format",
"(",
"cluster",
"=",
"cl... | Check the status of an OSD. Make sure all are up and in
What good output would look like::
{
"epoch": 8,
"num_osds": 1,
"num_up_osds": 1,
"num_in_osds": "1",
"full": "false",
"nearfull": "false"
}
Note how the booleans are actually strings, so we need to take that into
account and fix it before returning the dictionary. Issue #8108 | [
"Check",
"the",
"status",
"of",
"an",
"OSD",
".",
"Make",
"sure",
"all",
"are",
"up",
"and",
"in"
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/osd.py#L42-L85 | train | 228,291 |
ceph/ceph-deploy | ceph_deploy/osd.py | catch_osd_errors | def catch_osd_errors(conn, logger, args):
"""
Look for possible issues when checking the status of an OSD and
report them back to the user.
"""
logger.info('checking OSD status...')
status = osd_status_check(conn, args.cluster)
osds = int(status.get('num_osds', 0))
up_osds = int(status.get('num_up_osds', 0))
in_osds = int(status.get('num_in_osds', 0))
full = status.get('full', False)
nearfull = status.get('nearfull', False)
if osds > up_osds:
difference = osds - up_osds
logger.warning('there %s %d OSD%s down' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if osds > in_osds:
difference = osds - in_osds
logger.warning('there %s %d OSD%s out' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if full:
logger.warning('OSDs are full!')
if nearfull:
logger.warning('OSDs are near full!') | python | def catch_osd_errors(conn, logger, args):
"""
Look for possible issues when checking the status of an OSD and
report them back to the user.
"""
logger.info('checking OSD status...')
status = osd_status_check(conn, args.cluster)
osds = int(status.get('num_osds', 0))
up_osds = int(status.get('num_up_osds', 0))
in_osds = int(status.get('num_in_osds', 0))
full = status.get('full', False)
nearfull = status.get('nearfull', False)
if osds > up_osds:
difference = osds - up_osds
logger.warning('there %s %d OSD%s down' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if osds > in_osds:
difference = osds - in_osds
logger.warning('there %s %d OSD%s out' % (
['is', 'are'][difference != 1],
difference,
"s"[difference == 1:])
)
if full:
logger.warning('OSDs are full!')
if nearfull:
logger.warning('OSDs are near full!') | [
"def",
"catch_osd_errors",
"(",
"conn",
",",
"logger",
",",
"args",
")",
":",
"logger",
".",
"info",
"(",
"'checking OSD status...'",
")",
"status",
"=",
"osd_status_check",
"(",
"conn",
",",
"args",
".",
"cluster",
")",
"osds",
"=",
"int",
"(",
"status",
... | Look for possible issues when checking the status of an OSD and
report them back to the user. | [
"Look",
"for",
"possible",
"issues",
"when",
"checking",
"the",
"status",
"of",
"an",
"OSD",
"and",
"report",
"them",
"back",
"to",
"the",
"user",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/osd.py#L141-L174 | train | 228,292 |
ceph/ceph-deploy | ceph_deploy/osd.py | create_osd | def create_osd(
conn,
cluster,
data,
journal,
zap,
fs_type,
dmcrypt,
dmcrypt_dir,
storetype,
block_wal,
block_db,
**kw):
"""
Run on osd node, creates an OSD from a data disk.
"""
ceph_volume_executable = system.executable_path(conn, 'ceph-volume')
args = [
ceph_volume_executable,
'--cluster', cluster,
'lvm',
'create',
'--%s' % storetype,
'--data', data
]
if zap:
LOG.warning('zapping is no longer supported when preparing')
if dmcrypt:
args.append('--dmcrypt')
# TODO: re-enable dmcrypt support once ceph-volume grows it
LOG.warning('dmcrypt is currently not supported')
if storetype == 'bluestore':
if block_wal:
args.append('--block.wal')
args.append(block_wal)
if block_db:
args.append('--block.db')
args.append(block_db)
elif storetype == 'filestore':
if not journal:
raise RuntimeError('A journal lv or GPT partition must be specified when using filestore')
args.append('--journal')
args.append(journal)
if kw.get('debug'):
remoto.process.run(
conn,
args,
extend_env={'CEPH_VOLUME_DEBUG': '1'}
)
else:
remoto.process.run(
conn,
args
) | python | def create_osd(
conn,
cluster,
data,
journal,
zap,
fs_type,
dmcrypt,
dmcrypt_dir,
storetype,
block_wal,
block_db,
**kw):
"""
Run on osd node, creates an OSD from a data disk.
"""
ceph_volume_executable = system.executable_path(conn, 'ceph-volume')
args = [
ceph_volume_executable,
'--cluster', cluster,
'lvm',
'create',
'--%s' % storetype,
'--data', data
]
if zap:
LOG.warning('zapping is no longer supported when preparing')
if dmcrypt:
args.append('--dmcrypt')
# TODO: re-enable dmcrypt support once ceph-volume grows it
LOG.warning('dmcrypt is currently not supported')
if storetype == 'bluestore':
if block_wal:
args.append('--block.wal')
args.append(block_wal)
if block_db:
args.append('--block.db')
args.append(block_db)
elif storetype == 'filestore':
if not journal:
raise RuntimeError('A journal lv or GPT partition must be specified when using filestore')
args.append('--journal')
args.append(journal)
if kw.get('debug'):
remoto.process.run(
conn,
args,
extend_env={'CEPH_VOLUME_DEBUG': '1'}
)
else:
remoto.process.run(
conn,
args
) | [
"def",
"create_osd",
"(",
"conn",
",",
"cluster",
",",
"data",
",",
"journal",
",",
"zap",
",",
"fs_type",
",",
"dmcrypt",
",",
"dmcrypt_dir",
",",
"storetype",
",",
"block_wal",
",",
"block_db",
",",
"*",
"*",
"kw",
")",
":",
"ceph_volume_executable",
"... | Run on osd node, creates an OSD from a data disk. | [
"Run",
"on",
"osd",
"node",
"creates",
"an",
"OSD",
"from",
"a",
"data",
"disk",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/osd.py#L177-L233 | train | 228,293 |
ceph/ceph-deploy | ceph_deploy/osd.py | make | def make(parser):
"""
Prepare a data disk on remote host.
"""
sub_command_help = dedent("""
Create OSDs from a data disk on a remote host:
ceph-deploy osd create {node} --data /path/to/device
For bluestore, optional devices can be used::
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device
ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device
For filestore, the journal must be specified, as well as the objectstore::
ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal
For data devices, it can be an existing logical volume in the format of:
vg/lv, or a device. For other OSD components like wal, db, and journal, it
can be logical volume (in vg/lv format) or it must be a GPT partition.
"""
)
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.description = sub_command_help
osd_parser = parser.add_subparsers(dest='subcommand')
osd_parser.required = True
osd_list = osd_parser.add_parser(
'list',
help='List OSD info from remote host(s)'
)
osd_list.add_argument(
'host',
nargs='+',
metavar='HOST',
help='remote host(s) to list OSDs from'
)
osd_list.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
osd_create = osd_parser.add_parser(
'create',
help='Create new Ceph OSD daemon by preparing and activating a device'
)
osd_create.add_argument(
'--data',
metavar='DATA',
help='The OSD data logical volume (vg/lv) or absolute path to device'
)
osd_create.add_argument(
'--journal',
help='Logical Volume (vg/lv) or path to GPT partition',
)
osd_create.add_argument(
'--zap-disk',
action='store_true',
help='DEPRECATED - cannot zap when creating an OSD'
)
osd_create.add_argument(
'--fs-type',
metavar='FS_TYPE',
choices=['xfs',
'btrfs'
],
default='xfs',
help='filesystem to use to format DEVICE (xfs, btrfs)',
)
osd_create.add_argument(
'--dmcrypt',
action='store_true',
help='use dm-crypt on DEVICE',
)
osd_create.add_argument(
'--dmcrypt-key-dir',
metavar='KEYDIR',
default='/etc/ceph/dmcrypt-keys',
help='directory where dm-crypt keys are stored',
)
osd_create.add_argument(
'--filestore',
action='store_true', default=None,
help='filestore objectstore',
)
osd_create.add_argument(
'--bluestore',
action='store_true', default=None,
help='bluestore objectstore',
)
osd_create.add_argument(
'--block-db',
default=None,
help='bluestore block.db path'
)
osd_create.add_argument(
'--block-wal',
default=None,
help='bluestore block.wal path'
)
osd_create.add_argument(
'host',
nargs='?',
metavar='HOST',
help='Remote host to connect'
)
osd_create.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
parser.set_defaults(
func=osd,
) | python | def make(parser):
"""
Prepare a data disk on remote host.
"""
sub_command_help = dedent("""
Create OSDs from a data disk on a remote host:
ceph-deploy osd create {node} --data /path/to/device
For bluestore, optional devices can be used::
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device
ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device
For filestore, the journal must be specified, as well as the objectstore::
ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal
For data devices, it can be an existing logical volume in the format of:
vg/lv, or a device. For other OSD components like wal, db, and journal, it
can be logical volume (in vg/lv format) or it must be a GPT partition.
"""
)
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.description = sub_command_help
osd_parser = parser.add_subparsers(dest='subcommand')
osd_parser.required = True
osd_list = osd_parser.add_parser(
'list',
help='List OSD info from remote host(s)'
)
osd_list.add_argument(
'host',
nargs='+',
metavar='HOST',
help='remote host(s) to list OSDs from'
)
osd_list.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
osd_create = osd_parser.add_parser(
'create',
help='Create new Ceph OSD daemon by preparing and activating a device'
)
osd_create.add_argument(
'--data',
metavar='DATA',
help='The OSD data logical volume (vg/lv) or absolute path to device'
)
osd_create.add_argument(
'--journal',
help='Logical Volume (vg/lv) or path to GPT partition',
)
osd_create.add_argument(
'--zap-disk',
action='store_true',
help='DEPRECATED - cannot zap when creating an OSD'
)
osd_create.add_argument(
'--fs-type',
metavar='FS_TYPE',
choices=['xfs',
'btrfs'
],
default='xfs',
help='filesystem to use to format DEVICE (xfs, btrfs)',
)
osd_create.add_argument(
'--dmcrypt',
action='store_true',
help='use dm-crypt on DEVICE',
)
osd_create.add_argument(
'--dmcrypt-key-dir',
metavar='KEYDIR',
default='/etc/ceph/dmcrypt-keys',
help='directory where dm-crypt keys are stored',
)
osd_create.add_argument(
'--filestore',
action='store_true', default=None,
help='filestore objectstore',
)
osd_create.add_argument(
'--bluestore',
action='store_true', default=None,
help='bluestore objectstore',
)
osd_create.add_argument(
'--block-db',
default=None,
help='bluestore block.db path'
)
osd_create.add_argument(
'--block-wal',
default=None,
help='bluestore block.wal path'
)
osd_create.add_argument(
'host',
nargs='?',
metavar='HOST',
help='Remote host to connect'
)
osd_create.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
parser.set_defaults(
func=osd,
) | [
"def",
"make",
"(",
"parser",
")",
":",
"sub_command_help",
"=",
"dedent",
"(",
"\"\"\"\n Create OSDs from a data disk on a remote host:\n\n ceph-deploy osd create {node} --data /path/to/device\n\n For bluestore, optional devices can be used::\n\n ceph-deploy osd create {node... | Prepare a data disk on remote host. | [
"Prepare",
"a",
"data",
"disk",
"on",
"remote",
"host",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/osd.py#L445-L561 | train | 228,294 |
ceph/ceph-deploy | ceph_deploy/osd.py | make_disk | def make_disk(parser):
"""
Manage disks on a remote host.
"""
disk_parser = parser.add_subparsers(dest='subcommand')
disk_parser.required = True
disk_zap = disk_parser.add_parser(
'zap',
help='destroy existing data and filesystem on LV or partition',
)
disk_zap.add_argument(
'host',
nargs='?',
metavar='HOST',
help='Remote HOST(s) to connect'
)
disk_zap.add_argument(
'disk',
nargs='+',
metavar='DISK',
help='Disk(s) to zap'
)
disk_zap.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
disk_list = disk_parser.add_parser(
'list',
help='List disk info from remote host(s)'
)
disk_list.add_argument(
'host',
nargs='+',
metavar='HOST',
help='Remote HOST(s) to list OSDs from'
)
disk_list.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
parser.set_defaults(
func=disk,
) | python | def make_disk(parser):
"""
Manage disks on a remote host.
"""
disk_parser = parser.add_subparsers(dest='subcommand')
disk_parser.required = True
disk_zap = disk_parser.add_parser(
'zap',
help='destroy existing data and filesystem on LV or partition',
)
disk_zap.add_argument(
'host',
nargs='?',
metavar='HOST',
help='Remote HOST(s) to connect'
)
disk_zap.add_argument(
'disk',
nargs='+',
metavar='DISK',
help='Disk(s) to zap'
)
disk_zap.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
disk_list = disk_parser.add_parser(
'list',
help='List disk info from remote host(s)'
)
disk_list.add_argument(
'host',
nargs='+',
metavar='HOST',
help='Remote HOST(s) to list OSDs from'
)
disk_list.add_argument(
'--debug',
action='store_true',
help='Enable debug mode on remote ceph-volume calls',
)
parser.set_defaults(
func=disk,
) | [
"def",
"make_disk",
"(",
"parser",
")",
":",
"disk_parser",
"=",
"parser",
".",
"add_subparsers",
"(",
"dest",
"=",
"'subcommand'",
")",
"disk_parser",
".",
"required",
"=",
"True",
"disk_zap",
"=",
"disk_parser",
".",
"add_parser",
"(",
"'zap'",
",",
"help"... | Manage disks on a remote host. | [
"Manage",
"disks",
"on",
"a",
"remote",
"host",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/osd.py#L565-L610 | train | 228,295 |
ceph/ceph-deploy | ceph_deploy/hosts/centos/install.py | repository_url_part | def repository_url_part(distro):
"""
Historically everything CentOS, RHEL, and Scientific has been mapped to
`el6` urls, but as we are adding repositories for `rhel`, the URLs should
map correctly to, say, `rhel6` or `rhel7`.
This function looks into the `distro` object and determines the right url
part for the given distro, falling back to `el6` when all else fails.
Specifically to work around the issue of CentOS vs RHEL::
>>> import platform
>>> platform.linux_distribution()
('Red Hat Enterprise Linux Server', '7.0', 'Maipo')
"""
if distro.normalized_release.int_major >= 6:
if distro.normalized_name == 'redhat':
return 'rhel' + distro.normalized_release.major
if distro.normalized_name in ['centos', 'scientific', 'oracle', 'virtuozzo']:
return 'el' + distro.normalized_release.major
return 'el6' | python | def repository_url_part(distro):
"""
Historically everything CentOS, RHEL, and Scientific has been mapped to
`el6` urls, but as we are adding repositories for `rhel`, the URLs should
map correctly to, say, `rhel6` or `rhel7`.
This function looks into the `distro` object and determines the right url
part for the given distro, falling back to `el6` when all else fails.
Specifically to work around the issue of CentOS vs RHEL::
>>> import platform
>>> platform.linux_distribution()
('Red Hat Enterprise Linux Server', '7.0', 'Maipo')
"""
if distro.normalized_release.int_major >= 6:
if distro.normalized_name == 'redhat':
return 'rhel' + distro.normalized_release.major
if distro.normalized_name in ['centos', 'scientific', 'oracle', 'virtuozzo']:
return 'el' + distro.normalized_release.major
return 'el6' | [
"def",
"repository_url_part",
"(",
"distro",
")",
":",
"if",
"distro",
".",
"normalized_release",
".",
"int_major",
">=",
"6",
":",
"if",
"distro",
".",
"normalized_name",
"==",
"'redhat'",
":",
"return",
"'rhel'",
"+",
"distro",
".",
"normalized_release",
"."... | Historically everything CentOS, RHEL, and Scientific has been mapped to
`el6` urls, but as we are adding repositories for `rhel`, the URLs should
map correctly to, say, `rhel6` or `rhel7`.
This function looks into the `distro` object and determines the right url
part for the given distro, falling back to `el6` when all else fails.
Specifically to work around the issue of CentOS vs RHEL::
>>> import platform
>>> platform.linux_distribution()
('Red Hat Enterprise Linux Server', '7.0', 'Maipo') | [
"Historically",
"everything",
"CentOS",
"RHEL",
"and",
"Scientific",
"has",
"been",
"mapped",
"to",
"el6",
"urls",
"but",
"as",
"we",
"are",
"adding",
"repositories",
"for",
"rhel",
"the",
"URLs",
"should",
"map",
"correctly",
"to",
"say",
"rhel6",
"or",
"rh... | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/centos/install.py#L19-L41 | train | 228,296 |
ceph/ceph-deploy | ceph_deploy/install.py | sanitize_args | def sanitize_args(args):
"""
args may need a bunch of logic to set proper defaults that argparse is
not well suited for.
"""
if args.release is None:
args.release = 'nautilus'
args.default_release = True
# XXX This whole dance is because --stable is getting deprecated
if args.stable is not None:
LOG.warning('the --stable flag is deprecated, use --release instead')
args.release = args.stable
# XXX Tango ends here.
return args | python | def sanitize_args(args):
"""
args may need a bunch of logic to set proper defaults that argparse is
not well suited for.
"""
if args.release is None:
args.release = 'nautilus'
args.default_release = True
# XXX This whole dance is because --stable is getting deprecated
if args.stable is not None:
LOG.warning('the --stable flag is deprecated, use --release instead')
args.release = args.stable
# XXX Tango ends here.
return args | [
"def",
"sanitize_args",
"(",
"args",
")",
":",
"if",
"args",
".",
"release",
"is",
"None",
":",
"args",
".",
"release",
"=",
"'nautilus'",
"args",
".",
"default_release",
"=",
"True",
"# XXX This whole dance is because --stable is getting deprecated",
"if",
"args",
... | args may need a bunch of logic to set proper defaults that argparse is
not well suited for. | [
"args",
"may",
"need",
"a",
"bunch",
"of",
"logic",
"to",
"set",
"proper",
"defaults",
"that",
"argparse",
"is",
"not",
"well",
"suited",
"for",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/install.py#L14-L29 | train | 228,297 |
ceph/ceph-deploy | ceph_deploy/install.py | should_use_custom_repo | def should_use_custom_repo(args, cd_conf, repo_url):
"""
A boolean to determine the logic needed to proceed with a custom repo
installation instead of cramming everything nect to the logic operator.
"""
if repo_url:
# repo_url signals a CLI override, return False immediately
return False
if cd_conf:
if cd_conf.has_repos:
has_valid_release = args.release in cd_conf.get_repos()
has_default_repo = cd_conf.get_default_repo()
if has_valid_release or has_default_repo:
return True
return False | python | def should_use_custom_repo(args, cd_conf, repo_url):
"""
A boolean to determine the logic needed to proceed with a custom repo
installation instead of cramming everything nect to the logic operator.
"""
if repo_url:
# repo_url signals a CLI override, return False immediately
return False
if cd_conf:
if cd_conf.has_repos:
has_valid_release = args.release in cd_conf.get_repos()
has_default_repo = cd_conf.get_default_repo()
if has_valid_release or has_default_repo:
return True
return False | [
"def",
"should_use_custom_repo",
"(",
"args",
",",
"cd_conf",
",",
"repo_url",
")",
":",
"if",
"repo_url",
":",
"# repo_url signals a CLI override, return False immediately",
"return",
"False",
"if",
"cd_conf",
":",
"if",
"cd_conf",
".",
"has_repos",
":",
"has_valid_r... | A boolean to determine the logic needed to proceed with a custom repo
installation instead of cramming everything nect to the logic operator. | [
"A",
"boolean",
"to",
"determine",
"the",
"logic",
"needed",
"to",
"proceed",
"with",
"a",
"custom",
"repo",
"installation",
"instead",
"of",
"cramming",
"everything",
"nect",
"to",
"the",
"logic",
"operator",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/install.py#L215-L229 | train | 228,298 |
ceph/ceph-deploy | ceph_deploy/install.py | make_uninstall | def make_uninstall(parser):
"""
Remove Ceph packages from remote hosts.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to uninstall Ceph from',
)
parser.set_defaults(
func=uninstall,
) | python | def make_uninstall(parser):
"""
Remove Ceph packages from remote hosts.
"""
parser.add_argument(
'host',
metavar='HOST',
nargs='+',
help='hosts to uninstall Ceph from',
)
parser.set_defaults(
func=uninstall,
) | [
"def",
"make_uninstall",
"(",
"parser",
")",
":",
"parser",
".",
"add_argument",
"(",
"'host'",
",",
"metavar",
"=",
"'HOST'",
",",
"nargs",
"=",
"'+'",
",",
"help",
"=",
"'hosts to uninstall Ceph from'",
",",
")",
"parser",
".",
"set_defaults",
"(",
"func",... | Remove Ceph packages from remote hosts. | [
"Remove",
"Ceph",
"packages",
"from",
"remote",
"hosts",
"."
] | 86943fcc454cd4c99a86e3493e9e93a59c661fef | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/install.py#L626-L638 | train | 228,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.