repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
theelous3/multio | multio/__init__.py | SocketWrapper.wrap | def wrap(cls, meth):
'''
Wraps a connection opening method in this class.
'''
async def inner(*args, **kwargs):
sock = await meth(*args, **kwargs)
return cls(sock)
return inner | python | def wrap(cls, meth):
'''
Wraps a connection opening method in this class.
'''
async def inner(*args, **kwargs):
sock = await meth(*args, **kwargs)
return cls(sock)
return inner | [
"def",
"wrap",
"(",
"cls",
",",
"meth",
")",
":",
"async",
"def",
"inner",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"sock",
"=",
"await",
"meth",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"cls",
"(",
"sock",
")",
"... | Wraps a connection opening method in this class. | [
"Wraps",
"a",
"connection",
"opening",
"method",
"in",
"this",
"class",
"."
] | 018e4a9f78d5f4e78608a1a1537000b5fd778bbe | https://github.com/theelous3/multio/blob/018e4a9f78d5f4e78608a1a1537000b5fd778bbe/multio/__init__.py#L112-L121 | train | 22,800 |
theelous3/multio | multio/__init__.py | Event.set | async def set(self, *args, **kwargs):
'''
Sets the value of the event.
'''
return await _maybe_await(self.event.set(*args, **kwargs)) | python | async def set(self, *args, **kwargs):
'''
Sets the value of the event.
'''
return await _maybe_await(self.event.set(*args, **kwargs)) | [
"async",
"def",
"set",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"await",
"_maybe_await",
"(",
"self",
".",
"event",
".",
"set",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
")"
] | Sets the value of the event. | [
"Sets",
"the",
"value",
"of",
"the",
"event",
"."
] | 018e4a9f78d5f4e78608a1a1537000b5fd778bbe | https://github.com/theelous3/multio/blob/018e4a9f78d5f4e78608a1a1537000b5fd778bbe/multio/__init__.py#L168-L172 | train | 22,801 |
theelous3/multio | multio/__init__.py | _AsyncLibManager.register | def register(self, library: str, cbl: Callable[['_AsyncLib'], None]):
'''
Registers a callable to set up a library.
'''
self._handlers[library] = cbl | python | def register(self, library: str, cbl: Callable[['_AsyncLib'], None]):
'''
Registers a callable to set up a library.
'''
self._handlers[library] = cbl | [
"def",
"register",
"(",
"self",
",",
"library",
":",
"str",
",",
"cbl",
":",
"Callable",
"[",
"[",
"'_AsyncLib'",
"]",
",",
"None",
"]",
")",
":",
"self",
".",
"_handlers",
"[",
"library",
"]",
"=",
"cbl"
] | Registers a callable to set up a library. | [
"Registers",
"a",
"callable",
"to",
"set",
"up",
"a",
"library",
"."
] | 018e4a9f78d5f4e78608a1a1537000b5fd778bbe | https://github.com/theelous3/multio/blob/018e4a9f78d5f4e78608a1a1537000b5fd778bbe/multio/__init__.py#L253-L257 | train | 22,802 |
theelous3/multio | multio/_event_loop_wrappers.py | trio_open_connection | async def trio_open_connection(host, port, *, ssl=False, **kwargs):
'''
Allows connections to be made that may or may not require ssl.
Somewhat surprisingly trio doesn't have an abstraction for this like
curio even though it's fairly trivial to write. Down the line hopefully.
Args:
host (str): Network location, either by domain or IP.
port (int): The requested port.
ssl (bool or SSLContext): If False or None, SSL is not required. If
True, the context returned by trio.ssl.create_default_context will
be used. Otherwise, this may be an SSLContext object.
kwargs: A catch all to soak up curio's additional kwargs and
ignore them.
'''
import trio
if not ssl:
sock = await trio.open_tcp_stream(host, port)
else:
if isinstance(ssl, bool):
ssl_context = None
else:
ssl_context = ssl
sock = await trio.open_ssl_over_tcp_stream(host, port, ssl_context=ssl_context)
await sock.do_handshake()
sock.close = sock.aclose
return sock | python | async def trio_open_connection(host, port, *, ssl=False, **kwargs):
'''
Allows connections to be made that may or may not require ssl.
Somewhat surprisingly trio doesn't have an abstraction for this like
curio even though it's fairly trivial to write. Down the line hopefully.
Args:
host (str): Network location, either by domain or IP.
port (int): The requested port.
ssl (bool or SSLContext): If False or None, SSL is not required. If
True, the context returned by trio.ssl.create_default_context will
be used. Otherwise, this may be an SSLContext object.
kwargs: A catch all to soak up curio's additional kwargs and
ignore them.
'''
import trio
if not ssl:
sock = await trio.open_tcp_stream(host, port)
else:
if isinstance(ssl, bool):
ssl_context = None
else:
ssl_context = ssl
sock = await trio.open_ssl_over_tcp_stream(host, port, ssl_context=ssl_context)
await sock.do_handshake()
sock.close = sock.aclose
return sock | [
"async",
"def",
"trio_open_connection",
"(",
"host",
",",
"port",
",",
"*",
",",
"ssl",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"trio",
"if",
"not",
"ssl",
":",
"sock",
"=",
"await",
"trio",
".",
"open_tcp_stream",
"(",
"host",
","... | Allows connections to be made that may or may not require ssl.
Somewhat surprisingly trio doesn't have an abstraction for this like
curio even though it's fairly trivial to write. Down the line hopefully.
Args:
host (str): Network location, either by domain or IP.
port (int): The requested port.
ssl (bool or SSLContext): If False or None, SSL is not required. If
True, the context returned by trio.ssl.create_default_context will
be used. Otherwise, this may be an SSLContext object.
kwargs: A catch all to soak up curio's additional kwargs and
ignore them. | [
"Allows",
"connections",
"to",
"be",
"made",
"that",
"may",
"or",
"may",
"not",
"require",
"ssl",
".",
"Somewhat",
"surprisingly",
"trio",
"doesn",
"t",
"have",
"an",
"abstraction",
"for",
"this",
"like",
"curio",
"even",
"though",
"it",
"s",
"fairly",
"tr... | 018e4a9f78d5f4e78608a1a1537000b5fd778bbe | https://github.com/theelous3/multio/blob/018e4a9f78d5f4e78608a1a1537000b5fd778bbe/multio/_event_loop_wrappers.py#L12-L39 | train | 22,803 |
Fizzadar/pyinfra | pyinfra/modules/puppet.py | agent | def agent(state, host, server=None, port=None):
"""
Run puppet agent
+ server: master server URL
+ port: puppet master port
"""
args = []
if server:
args.append('--server=%s' % server)
if port:
args.append('--masterport=%s' % port)
yield 'puppet agent -t %s' % ' '.join(args) | python | def agent(state, host, server=None, port=None):
"""
Run puppet agent
+ server: master server URL
+ port: puppet master port
"""
args = []
if server:
args.append('--server=%s' % server)
if port:
args.append('--masterport=%s' % port)
yield 'puppet agent -t %s' % ' '.join(args) | [
"def",
"agent",
"(",
"state",
",",
"host",
",",
"server",
"=",
"None",
",",
"port",
"=",
"None",
")",
":",
"args",
"=",
"[",
"]",
"if",
"server",
":",
"args",
".",
"append",
"(",
"'--server=%s'",
"%",
"server",
")",
"if",
"port",
":",
"args",
"."... | Run puppet agent
+ server: master server URL
+ port: puppet master port | [
"Run",
"puppet",
"agent"
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/puppet.py#L5-L20 | train | 22,804 |
Fizzadar/pyinfra | pyinfra_cli/config.py | load_config | def load_config(deploy_dir):
'''
Loads any local config.py file.
'''
config = Config()
config_filename = path.join(deploy_dir, 'config.py')
if path.exists(config_filename):
extract_file_config(config_filename, config)
# Now execute the file to trigger loading of any hooks
exec_file(config_filename)
return config | python | def load_config(deploy_dir):
'''
Loads any local config.py file.
'''
config = Config()
config_filename = path.join(deploy_dir, 'config.py')
if path.exists(config_filename):
extract_file_config(config_filename, config)
# Now execute the file to trigger loading of any hooks
exec_file(config_filename)
return config | [
"def",
"load_config",
"(",
"deploy_dir",
")",
":",
"config",
"=",
"Config",
"(",
")",
"config_filename",
"=",
"path",
".",
"join",
"(",
"deploy_dir",
",",
"'config.py'",
")",
"if",
"path",
".",
"exists",
"(",
"config_filename",
")",
":",
"extract_file_config... | Loads any local config.py file. | [
"Loads",
"any",
"local",
"config",
".",
"py",
"file",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra_cli/config.py#L67-L81 | train | 22,805 |
Fizzadar/pyinfra | pyinfra_cli/config.py | load_deploy_config | def load_deploy_config(deploy_filename, config=None):
'''
Loads any local config overrides in the deploy file.
'''
if not config:
config = Config()
if not deploy_filename:
return
if path.exists(deploy_filename):
extract_file_config(deploy_filename, config)
return config | python | def load_deploy_config(deploy_filename, config=None):
'''
Loads any local config overrides in the deploy file.
'''
if not config:
config = Config()
if not deploy_filename:
return
if path.exists(deploy_filename):
extract_file_config(deploy_filename, config)
return config | [
"def",
"load_deploy_config",
"(",
"deploy_filename",
",",
"config",
"=",
"None",
")",
":",
"if",
"not",
"config",
":",
"config",
"=",
"Config",
"(",
")",
"if",
"not",
"deploy_filename",
":",
"return",
"if",
"path",
".",
"exists",
"(",
"deploy_filename",
")... | Loads any local config overrides in the deploy file. | [
"Loads",
"any",
"local",
"config",
"overrides",
"in",
"the",
"deploy",
"file",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra_cli/config.py#L84-L98 | train | 22,806 |
Fizzadar/pyinfra | pyinfra/facts/iptables.py | parse_iptables_rule | def parse_iptables_rule(line):
'''
Parse one iptables rule. Returns a dict where each iptables code argument
is mapped to a name using IPTABLES_ARGS.
'''
bits = line.split()
definition = {}
key = None
args = []
not_arg = False
def add_args():
arg_string = ' '.join(args)
if key in IPTABLES_ARGS:
definition_key = (
'not_{0}'.format(IPTABLES_ARGS[key])
if not_arg
else IPTABLES_ARGS[key]
)
definition[definition_key] = arg_string
else:
definition.setdefault('extras', []).extend((key, arg_string))
for bit in bits:
if bit == '!':
if key:
add_args()
args = []
key = None
not_arg = True
elif bit.startswith('-'):
if key:
add_args()
args = []
not_arg = False
key = bit
else:
args.append(bit)
if key:
add_args()
if 'extras' in definition:
definition['extras'] = set(definition['extras'])
return definition | python | def parse_iptables_rule(line):
'''
Parse one iptables rule. Returns a dict where each iptables code argument
is mapped to a name using IPTABLES_ARGS.
'''
bits = line.split()
definition = {}
key = None
args = []
not_arg = False
def add_args():
arg_string = ' '.join(args)
if key in IPTABLES_ARGS:
definition_key = (
'not_{0}'.format(IPTABLES_ARGS[key])
if not_arg
else IPTABLES_ARGS[key]
)
definition[definition_key] = arg_string
else:
definition.setdefault('extras', []).extend((key, arg_string))
for bit in bits:
if bit == '!':
if key:
add_args()
args = []
key = None
not_arg = True
elif bit.startswith('-'):
if key:
add_args()
args = []
not_arg = False
key = bit
else:
args.append(bit)
if key:
add_args()
if 'extras' in definition:
definition['extras'] = set(definition['extras'])
return definition | [
"def",
"parse_iptables_rule",
"(",
"line",
")",
":",
"bits",
"=",
"line",
".",
"split",
"(",
")",
"definition",
"=",
"{",
"}",
"key",
"=",
"None",
"args",
"=",
"[",
"]",
"not_arg",
"=",
"False",
"def",
"add_args",
"(",
")",
":",
"arg_string",
"=",
... | Parse one iptables rule. Returns a dict where each iptables code argument
is mapped to a name using IPTABLES_ARGS. | [
"Parse",
"one",
"iptables",
"rule",
".",
"Returns",
"a",
"dict",
"where",
"each",
"iptables",
"code",
"argument",
"is",
"mapped",
"to",
"a",
"name",
"using",
"IPTABLES_ARGS",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/facts/iptables.py#L31-L84 | train | 22,807 |
Fizzadar/pyinfra | pyinfra/api/operation.py | add_op | def add_op(state, op_func, *args, **kwargs):
'''
Prepare & add an operation to ``pyinfra.state`` by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
to op_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function
'''
frameinfo = get_caller_frameinfo()
kwargs['frameinfo'] = frameinfo
for host in state.inventory:
op_func(state, host, *args, **kwargs) | python | def add_op(state, op_func, *args, **kwargs):
'''
Prepare & add an operation to ``pyinfra.state`` by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
to op_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function
'''
frameinfo = get_caller_frameinfo()
kwargs['frameinfo'] = frameinfo
for host in state.inventory:
op_func(state, host, *args, **kwargs) | [
"def",
"add_op",
"(",
"state",
",",
"op_func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"frameinfo",
"=",
"get_caller_frameinfo",
"(",
")",
"kwargs",
"[",
"'frameinfo'",
"]",
"=",
"frameinfo",
"for",
"host",
"in",
"state",
".",
"inventory",
... | Prepare & add an operation to ``pyinfra.state`` by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
to op_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function | [
"Prepare",
"&",
"add",
"an",
"operation",
"to",
"pyinfra",
".",
"state",
"by",
"executing",
"it",
"on",
"all",
"hosts",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/operation.py#L59-L74 | train | 22,808 |
Fizzadar/pyinfra | pyinfra/api/deploy.py | add_deploy | def add_deploy(state, deploy_func, *args, **kwargs):
'''
Prepare & add an deploy to pyinfra.state by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
deploy_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function
'''
frameinfo = get_caller_frameinfo()
kwargs['frameinfo'] = frameinfo
for host in state.inventory:
deploy_func(state, host, *args, **kwargs) | python | def add_deploy(state, deploy_func, *args, **kwargs):
'''
Prepare & add an deploy to pyinfra.state by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
deploy_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function
'''
frameinfo = get_caller_frameinfo()
kwargs['frameinfo'] = frameinfo
for host in state.inventory:
deploy_func(state, host, *args, **kwargs) | [
"def",
"add_deploy",
"(",
"state",
",",
"deploy_func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"frameinfo",
"=",
"get_caller_frameinfo",
"(",
")",
"kwargs",
"[",
"'frameinfo'",
"]",
"=",
"frameinfo",
"for",
"host",
"in",
"state",
".",
"inven... | Prepare & add an deploy to pyinfra.state by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
deploy_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function | [
"Prepare",
"&",
"add",
"an",
"deploy",
"to",
"pyinfra",
".",
"state",
"by",
"executing",
"it",
"on",
"all",
"hosts",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/deploy.py#L24-L39 | train | 22,809 |
Fizzadar/pyinfra | pyinfra_cli/legacy.py | setup_arguments | def setup_arguments(arguments):
'''
Prepares argumnents output by docopt.
'''
# Ensure parallel/port are numbers
for key in ('--parallel', '--port', '--fail-percent'):
if arguments[key]:
try:
arguments[key] = int(arguments[key])
except ValueError:
raise CliError('{0} is not a valid integer for {1}'.format(
arguments[key], key,
))
# Prep --run OP ARGS
if arguments['--run']:
op, args = setup_op_and_args(arguments['--run'], arguments['ARGS'])
else:
op = args = None
# Check deploy file exists
if arguments['DEPLOY']:
if not path.exists(arguments['DEPLOY']):
raise CliError('Deploy file not found: {0}'.format(arguments['DEPLOY']))
# Check our key file exists
if arguments['--key']:
if not path.exists(arguments['--key']):
raise CliError('Private key file not found: {0}'.format(arguments['--key']))
# Setup the rest
return {
# Deploy options
'inventory': arguments['-i'],
'deploy': arguments['DEPLOY'],
'verbose': arguments['-v'],
'dry': arguments['--dry'],
'serial': arguments['--serial'],
'no_wait': arguments['--no-wait'],
'debug': arguments['--debug'],
'debug_data': arguments['--debug-data'],
'debug_state': arguments['--debug-state'],
'fact': arguments['--fact'],
'limit': arguments['--limit'],
'op': op,
'op_args': args,
# Config options
'user': arguments['--user'],
'key': arguments['--key'],
'key_password': arguments['--key-password'],
'password': arguments['--password'],
'port': arguments['--port'],
'sudo': arguments['--sudo'],
'sudo_user': arguments['--sudo-user'],
'su_user': arguments['--su-user'],
'parallel': arguments['--parallel'],
'fail_percent': arguments['--fail-percent'],
} | python | def setup_arguments(arguments):
'''
Prepares argumnents output by docopt.
'''
# Ensure parallel/port are numbers
for key in ('--parallel', '--port', '--fail-percent'):
if arguments[key]:
try:
arguments[key] = int(arguments[key])
except ValueError:
raise CliError('{0} is not a valid integer for {1}'.format(
arguments[key], key,
))
# Prep --run OP ARGS
if arguments['--run']:
op, args = setup_op_and_args(arguments['--run'], arguments['ARGS'])
else:
op = args = None
# Check deploy file exists
if arguments['DEPLOY']:
if not path.exists(arguments['DEPLOY']):
raise CliError('Deploy file not found: {0}'.format(arguments['DEPLOY']))
# Check our key file exists
if arguments['--key']:
if not path.exists(arguments['--key']):
raise CliError('Private key file not found: {0}'.format(arguments['--key']))
# Setup the rest
return {
# Deploy options
'inventory': arguments['-i'],
'deploy': arguments['DEPLOY'],
'verbose': arguments['-v'],
'dry': arguments['--dry'],
'serial': arguments['--serial'],
'no_wait': arguments['--no-wait'],
'debug': arguments['--debug'],
'debug_data': arguments['--debug-data'],
'debug_state': arguments['--debug-state'],
'fact': arguments['--fact'],
'limit': arguments['--limit'],
'op': op,
'op_args': args,
# Config options
'user': arguments['--user'],
'key': arguments['--key'],
'key_password': arguments['--key-password'],
'password': arguments['--password'],
'port': arguments['--port'],
'sudo': arguments['--sudo'],
'sudo_user': arguments['--sudo-user'],
'su_user': arguments['--su-user'],
'parallel': arguments['--parallel'],
'fail_percent': arguments['--fail-percent'],
} | [
"def",
"setup_arguments",
"(",
"arguments",
")",
":",
"# Ensure parallel/port are numbers",
"for",
"key",
"in",
"(",
"'--parallel'",
",",
"'--port'",
",",
"'--fail-percent'",
")",
":",
"if",
"arguments",
"[",
"key",
"]",
":",
"try",
":",
"arguments",
"[",
"key... | Prepares argumnents output by docopt. | [
"Prepares",
"argumnents",
"output",
"by",
"docopt",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra_cli/legacy.py#L210-L272 | train | 22,810 |
Fizzadar/pyinfra | pyinfra/modules/mysql.py | sql | def sql(
state, host, sql,
database=None,
# Details for speaking to MySQL via `mysql` CLI
mysql_user=None, mysql_password=None,
mysql_host=None, mysql_port=None,
):
'''
Execute arbitrary SQL against MySQL.
+ sql: SQL command(s) to execute
+ database: optional database to open the connection with
+ mysql_*: global module arguments, see above
'''
yield make_execute_mysql_command(
sql,
database=database,
user=mysql_user,
password=mysql_password,
host=mysql_host,
port=mysql_port,
) | python | def sql(
state, host, sql,
database=None,
# Details for speaking to MySQL via `mysql` CLI
mysql_user=None, mysql_password=None,
mysql_host=None, mysql_port=None,
):
'''
Execute arbitrary SQL against MySQL.
+ sql: SQL command(s) to execute
+ database: optional database to open the connection with
+ mysql_*: global module arguments, see above
'''
yield make_execute_mysql_command(
sql,
database=database,
user=mysql_user,
password=mysql_password,
host=mysql_host,
port=mysql_port,
) | [
"def",
"sql",
"(",
"state",
",",
"host",
",",
"sql",
",",
"database",
"=",
"None",
",",
"# Details for speaking to MySQL via `mysql` CLI",
"mysql_user",
"=",
"None",
",",
"mysql_password",
"=",
"None",
",",
"mysql_host",
"=",
"None",
",",
"mysql_port",
"=",
"N... | Execute arbitrary SQL against MySQL.
+ sql: SQL command(s) to execute
+ database: optional database to open the connection with
+ mysql_*: global module arguments, see above | [
"Execute",
"arbitrary",
"SQL",
"against",
"MySQL",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/mysql.py#L24-L46 | train | 22,811 |
Fizzadar/pyinfra | pyinfra/modules/mysql.py | dump | def dump(
state, host,
remote_filename, database=None,
# Details for speaking to MySQL via `mysql` CLI
mysql_user=None, mysql_password=None,
mysql_host=None, mysql_port=None,
):
'''
Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ mysql_*: global module arguments, see above
'''
yield '{0} > {1}'.format(make_mysql_command(
executable='mysqldump',
database=database,
user=mysql_user,
password=mysql_password,
host=mysql_host,
port=mysql_port,
), remote_filename) | python | def dump(
state, host,
remote_filename, database=None,
# Details for speaking to MySQL via `mysql` CLI
mysql_user=None, mysql_password=None,
mysql_host=None, mysql_port=None,
):
'''
Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ mysql_*: global module arguments, see above
'''
yield '{0} > {1}'.format(make_mysql_command(
executable='mysqldump',
database=database,
user=mysql_user,
password=mysql_password,
host=mysql_host,
port=mysql_port,
), remote_filename) | [
"def",
"dump",
"(",
"state",
",",
"host",
",",
"remote_filename",
",",
"database",
"=",
"None",
",",
"# Details for speaking to MySQL via `mysql` CLI",
"mysql_user",
"=",
"None",
",",
"mysql_password",
"=",
"None",
",",
"mysql_host",
"=",
"None",
",",
"mysql_port"... | Dump a MySQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ mysql_*: global module arguments, see above | [
"Dump",
"a",
"MySQL",
"database",
"into",
"a",
".",
"sql",
"file",
".",
"Requires",
"mysqldump",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/mysql.py#L306-L328 | train | 22,812 |
Fizzadar/pyinfra | pyinfra/api/inventory.py | Inventory.get_host | def get_host(self, name, default=NoHostError):
'''
Get a single host by name.
'''
if name in self.hosts:
return self.hosts[name]
if default is NoHostError:
raise NoHostError('No such host: {0}'.format(name))
return default | python | def get_host(self, name, default=NoHostError):
'''
Get a single host by name.
'''
if name in self.hosts:
return self.hosts[name]
if default is NoHostError:
raise NoHostError('No such host: {0}'.format(name))
return default | [
"def",
"get_host",
"(",
"self",
",",
"name",
",",
"default",
"=",
"NoHostError",
")",
":",
"if",
"name",
"in",
"self",
".",
"hosts",
":",
"return",
"self",
".",
"hosts",
"[",
"name",
"]",
"if",
"default",
"is",
"NoHostError",
":",
"raise",
"NoHostError... | Get a single host by name. | [
"Get",
"a",
"single",
"host",
"by",
"name",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/inventory.py#L258-L269 | train | 22,813 |
Fizzadar/pyinfra | pyinfra/api/inventory.py | Inventory.get_group | def get_group(self, name, default=NoGroupError):
'''
Get a list of hosts belonging to a group.
'''
if name in self.groups:
return self.groups[name]
if default is NoGroupError:
raise NoGroupError('No such group: {0}'.format(name))
return default | python | def get_group(self, name, default=NoGroupError):
'''
Get a list of hosts belonging to a group.
'''
if name in self.groups:
return self.groups[name]
if default is NoGroupError:
raise NoGroupError('No such group: {0}'.format(name))
return default | [
"def",
"get_group",
"(",
"self",
",",
"name",
",",
"default",
"=",
"NoGroupError",
")",
":",
"if",
"name",
"in",
"self",
".",
"groups",
":",
"return",
"self",
".",
"groups",
"[",
"name",
"]",
"if",
"default",
"is",
"NoGroupError",
":",
"raise",
"NoGrou... | Get a list of hosts belonging to a group. | [
"Get",
"a",
"list",
"of",
"hosts",
"belonging",
"to",
"a",
"group",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/inventory.py#L271-L282 | train | 22,814 |
Fizzadar/pyinfra | pyinfra/api/inventory.py | Inventory.get_groups_data | def get_groups_data(self, groups):
'''
Gets aggregated data from a list of groups. Vars are collected in order so, for
any groups which define the same var twice, the last group's value will hold.
'''
data = {}
for group in groups:
data.update(self.get_group_data(group))
return data | python | def get_groups_data(self, groups):
'''
Gets aggregated data from a list of groups. Vars are collected in order so, for
any groups which define the same var twice, the last group's value will hold.
'''
data = {}
for group in groups:
data.update(self.get_group_data(group))
return data | [
"def",
"get_groups_data",
"(",
"self",
",",
"groups",
")",
":",
"data",
"=",
"{",
"}",
"for",
"group",
"in",
"groups",
":",
"data",
".",
"update",
"(",
"self",
".",
"get_group_data",
"(",
"group",
")",
")",
"return",
"data"
] | Gets aggregated data from a list of groups. Vars are collected in order so, for
any groups which define the same var twice, the last group's value will hold. | [
"Gets",
"aggregated",
"data",
"from",
"a",
"list",
"of",
"groups",
".",
"Vars",
"are",
"collected",
"in",
"order",
"so",
"for",
"any",
"groups",
"which",
"define",
"the",
"same",
"var",
"twice",
"the",
"last",
"group",
"s",
"value",
"will",
"hold",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/inventory.py#L312-L323 | train | 22,815 |
Fizzadar/pyinfra | pyinfra/api/inventory.py | Inventory.get_deploy_data | def get_deploy_data(self):
'''
Gets any default data attached to the current deploy, if any.
'''
if self.state and self.state.deploy_data:
return self.state.deploy_data
return {} | python | def get_deploy_data(self):
'''
Gets any default data attached to the current deploy, if any.
'''
if self.state and self.state.deploy_data:
return self.state.deploy_data
return {} | [
"def",
"get_deploy_data",
"(",
"self",
")",
":",
"if",
"self",
".",
"state",
"and",
"self",
".",
"state",
".",
"deploy_data",
":",
"return",
"self",
".",
"state",
".",
"deploy_data",
"return",
"{",
"}"
] | Gets any default data attached to the current deploy, if any. | [
"Gets",
"any",
"default",
"data",
"attached",
"to",
"the",
"current",
"deploy",
"if",
"any",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/inventory.py#L325-L333 | train | 22,816 |
Fizzadar/pyinfra | pyinfra/modules/git.py | config | def config(
state, host, key, value,
repo=None,
):
'''
Manage git config for a repository or globally.
+ key: the key of the config to ensure
+ value: the value this key should have
+ repo: specify the git repo path to edit local config (defaults to global)
'''
existing_config = host.fact.git_config(repo)
if key not in existing_config or existing_config[key] != value:
if repo is None:
yield 'git config --global {0} "{1}"'.format(key, value)
else:
yield 'cd {0} && git config --local {1} "{2}"'.format(repo, key, value) | python | def config(
state, host, key, value,
repo=None,
):
'''
Manage git config for a repository or globally.
+ key: the key of the config to ensure
+ value: the value this key should have
+ repo: specify the git repo path to edit local config (defaults to global)
'''
existing_config = host.fact.git_config(repo)
if key not in existing_config or existing_config[key] != value:
if repo is None:
yield 'git config --global {0} "{1}"'.format(key, value)
else:
yield 'cd {0} && git config --local {1} "{2}"'.format(repo, key, value) | [
"def",
"config",
"(",
"state",
",",
"host",
",",
"key",
",",
"value",
",",
"repo",
"=",
"None",
",",
")",
":",
"existing_config",
"=",
"host",
".",
"fact",
".",
"git_config",
"(",
"repo",
")",
"if",
"key",
"not",
"in",
"existing_config",
"or",
"exist... | Manage git config for a repository or globally.
+ key: the key of the config to ensure
+ value: the value this key should have
+ repo: specify the git repo path to edit local config (defaults to global) | [
"Manage",
"git",
"config",
"for",
"a",
"repository",
"or",
"globally",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/git.py#L23-L41 | train | 22,817 |
Fizzadar/pyinfra | pyinfra/local.py | include | def include(filename, hosts=False, when=True):
'''
Executes a local python file within the ``pyinfra.pseudo_state.deploy_dir``
directory.
Args:
hosts (string, list): group name or list of hosts to limit this include to
when (bool): indicate whether to trigger operations in this include
'''
if not pyinfra.is_cli:
raise PyinfraError('local.include is only available in CLI mode.')
if not when:
return
if hosts is not False:
hosts = ensure_host_list(hosts, inventory=pseudo_state.inventory)
if pseudo_host not in hosts:
return
if pseudo_state.deploy_dir:
filename = path.join(pseudo_state.deploy_dir, filename)
frameinfo = get_caller_frameinfo()
logger.debug('Including local file: {0}'.format(filename))
try:
# Fixes a circular import because `pyinfra.local` is really a CLI
# only thing (so should be `pyinfra_cli.local`). It is kept here
# to maintain backwards compatability and the nicer public import
# (ideally users never need to import from `pyinfra_cli`).
from pyinfra_cli.config import extract_file_config
from pyinfra_cli.util import exec_file
# Load any config defined in the file and setup like a @deploy
config_data = extract_file_config(filename)
kwargs = {
key.lower(): value
for key, value in six.iteritems(config_data)
if key in [
'SUDO', 'SUDO_USER', 'SU_USER',
'PRESERVE_SUDO_ENV', 'IGNORE_ERRORS',
]
}
with pseudo_state.deploy(
filename, kwargs, None, frameinfo.lineno,
in_deploy=False,
):
exec_file(filename)
# One potential solution to the above is to add local as an actual
# module, ie `pyinfra.modules.local`.
except IOError as e:
raise PyinfraError(
'Could not include local file: {0}\n{1}'.format(filename, e),
) | python | def include(filename, hosts=False, when=True):
'''
Executes a local python file within the ``pyinfra.pseudo_state.deploy_dir``
directory.
Args:
hosts (string, list): group name or list of hosts to limit this include to
when (bool): indicate whether to trigger operations in this include
'''
if not pyinfra.is_cli:
raise PyinfraError('local.include is only available in CLI mode.')
if not when:
return
if hosts is not False:
hosts = ensure_host_list(hosts, inventory=pseudo_state.inventory)
if pseudo_host not in hosts:
return
if pseudo_state.deploy_dir:
filename = path.join(pseudo_state.deploy_dir, filename)
frameinfo = get_caller_frameinfo()
logger.debug('Including local file: {0}'.format(filename))
try:
# Fixes a circular import because `pyinfra.local` is really a CLI
# only thing (so should be `pyinfra_cli.local`). It is kept here
# to maintain backwards compatability and the nicer public import
# (ideally users never need to import from `pyinfra_cli`).
from pyinfra_cli.config import extract_file_config
from pyinfra_cli.util import exec_file
# Load any config defined in the file and setup like a @deploy
config_data = extract_file_config(filename)
kwargs = {
key.lower(): value
for key, value in six.iteritems(config_data)
if key in [
'SUDO', 'SUDO_USER', 'SU_USER',
'PRESERVE_SUDO_ENV', 'IGNORE_ERRORS',
]
}
with pseudo_state.deploy(
filename, kwargs, None, frameinfo.lineno,
in_deploy=False,
):
exec_file(filename)
# One potential solution to the above is to add local as an actual
# module, ie `pyinfra.modules.local`.
except IOError as e:
raise PyinfraError(
'Could not include local file: {0}\n{1}'.format(filename, e),
) | [
"def",
"include",
"(",
"filename",
",",
"hosts",
"=",
"False",
",",
"when",
"=",
"True",
")",
":",
"if",
"not",
"pyinfra",
".",
"is_cli",
":",
"raise",
"PyinfraError",
"(",
"'local.include is only available in CLI mode.'",
")",
"if",
"not",
"when",
":",
"ret... | Executes a local python file within the ``pyinfra.pseudo_state.deploy_dir``
directory.
Args:
hosts (string, list): group name or list of hosts to limit this include to
when (bool): indicate whether to trigger operations in this include | [
"Executes",
"a",
"local",
"python",
"file",
"within",
"the",
"pyinfra",
".",
"pseudo_state",
".",
"deploy_dir",
"directory",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/local.py#L19-L78 | train | 22,818 |
jdowner/gist | gist/gist.py | GistAPI.send | def send(self, request, stem=None):
"""Prepare and send a request
Arguments:
request: a Request object that is not yet prepared
stem: a path to append to the root URL
Returns:
The response to the request
"""
if stem is not None:
request.url = request.url + "/" + stem.lstrip("/")
prepped = self.session.prepare_request(request)
settings = self.session.merge_environment_settings(url=prepped.url,
proxies={},
stream=None,
verify=None,
cert=None)
return self.session.send(prepped, **settings) | python | def send(self, request, stem=None):
"""Prepare and send a request
Arguments:
request: a Request object that is not yet prepared
stem: a path to append to the root URL
Returns:
The response to the request
"""
if stem is not None:
request.url = request.url + "/" + stem.lstrip("/")
prepped = self.session.prepare_request(request)
settings = self.session.merge_environment_settings(url=prepped.url,
proxies={},
stream=None,
verify=None,
cert=None)
return self.session.send(prepped, **settings) | [
"def",
"send",
"(",
"self",
",",
"request",
",",
"stem",
"=",
"None",
")",
":",
"if",
"stem",
"is",
"not",
"None",
":",
"request",
".",
"url",
"=",
"request",
".",
"url",
"+",
"\"/\"",
"+",
"stem",
".",
"lstrip",
"(",
"\"/\"",
")",
"prepped",
"="... | Prepare and send a request
Arguments:
request: a Request object that is not yet prepared
stem: a path to append to the root URL
Returns:
The response to the request | [
"Prepare",
"and",
"send",
"a",
"request"
] | 0f2941434f63c5aed69218edad454de8c73819a0 | https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L143-L164 | train | 22,819 |
jdowner/gist | gist/gist.py | GistAPI.list | def list(self):
"""Returns a list of the users gists as GistInfo objects
Returns:
a list of GistInfo objects
"""
# Define the basic request. The per_page parameter is set to 100, which
# is the maximum github allows. If the user has more than one page of
# gists, this request object will be modified to retrieve each
# successive page of gists.
request = requests.Request(
'GET',
'https://api.github.com/gists',
headers={
'Accept-Encoding': 'identity, deflate, compress, gzip',
'User-Agent': 'python-requests/1.2.0',
'Accept': 'application/vnd.github.v3.base64',
},
params={
'access_token': self.token,
'per_page': 100,
},
)
# Github provides a 'link' header that contains information to
# navigate through a users page of gists. This regex is used to
# extract the URLs contained in this header, and to find the next page
# of gists.
pattern = re.compile(r'<([^>]*)>; rel="([^"]*)"')
gists = []
while True:
# Retrieve the next page of gists
try:
response = self.send(request).json()
except Exception:
break
# Extract the list of gists
for gist in response:
try:
gists.append(
GistInfo(
gist['id'],
gist['public'],
gist['description'],
)
)
except KeyError:
continue
try:
link = response.headers['link']
# Search for the next page of gist. If a 'next' page is found,
# the URL is set to this new page and the iteration continues.
# If there is no next page, return the list of gists.
for result in pattern.finditer(link):
url = result.group(1)
rel = result.group(2)
if rel == 'next':
request.url = url
break
else:
return gists
except Exception:
break
return gists | python | def list(self):
"""Returns a list of the users gists as GistInfo objects
Returns:
a list of GistInfo objects
"""
# Define the basic request. The per_page parameter is set to 100, which
# is the maximum github allows. If the user has more than one page of
# gists, this request object will be modified to retrieve each
# successive page of gists.
request = requests.Request(
'GET',
'https://api.github.com/gists',
headers={
'Accept-Encoding': 'identity, deflate, compress, gzip',
'User-Agent': 'python-requests/1.2.0',
'Accept': 'application/vnd.github.v3.base64',
},
params={
'access_token': self.token,
'per_page': 100,
},
)
# Github provides a 'link' header that contains information to
# navigate through a users page of gists. This regex is used to
# extract the URLs contained in this header, and to find the next page
# of gists.
pattern = re.compile(r'<([^>]*)>; rel="([^"]*)"')
gists = []
while True:
# Retrieve the next page of gists
try:
response = self.send(request).json()
except Exception:
break
# Extract the list of gists
for gist in response:
try:
gists.append(
GistInfo(
gist['id'],
gist['public'],
gist['description'],
)
)
except KeyError:
continue
try:
link = response.headers['link']
# Search for the next page of gist. If a 'next' page is found,
# the URL is set to this new page and the iteration continues.
# If there is no next page, return the list of gists.
for result in pattern.finditer(link):
url = result.group(1)
rel = result.group(2)
if rel == 'next':
request.url = url
break
else:
return gists
except Exception:
break
return gists | [
"def",
"list",
"(",
"self",
")",
":",
"# Define the basic request. The per_page parameter is set to 100, which",
"# is the maximum github allows. If the user has more than one page of",
"# gists, this request object will be modified to retrieve each",
"# successive page of gists.",
"request",
... | Returns a list of the users gists as GistInfo objects
Returns:
a list of GistInfo objects | [
"Returns",
"a",
"list",
"of",
"the",
"users",
"gists",
"as",
"GistInfo",
"objects"
] | 0f2941434f63c5aed69218edad454de8c73819a0 | https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L166-L239 | train | 22,820 |
jdowner/gist | gist/gist.py | GistAPI.create | def create(self, request, desc, files, public=False):
"""Creates a gist
Arguments:
request: an initial request object
desc: the gist description
files: a list of files to add to the gist
public: a flag to indicate whether the gist is public or not
Returns:
The URL to the newly created gist.
"""
request.data = json.dumps({
"description": desc,
"public": public,
"files": files,
})
return self.send(request).json()['html_url'] | python | def create(self, request, desc, files, public=False):
"""Creates a gist
Arguments:
request: an initial request object
desc: the gist description
files: a list of files to add to the gist
public: a flag to indicate whether the gist is public or not
Returns:
The URL to the newly created gist.
"""
request.data = json.dumps({
"description": desc,
"public": public,
"files": files,
})
return self.send(request).json()['html_url'] | [
"def",
"create",
"(",
"self",
",",
"request",
",",
"desc",
",",
"files",
",",
"public",
"=",
"False",
")",
":",
"request",
".",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"description\"",
":",
"desc",
",",
"\"public\"",
":",
"public",
",",
"\"fil... | Creates a gist
Arguments:
request: an initial request object
desc: the gist description
files: a list of files to add to the gist
public: a flag to indicate whether the gist is public or not
Returns:
The URL to the newly created gist. | [
"Creates",
"a",
"gist"
] | 0f2941434f63c5aed69218edad454de8c73819a0 | https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L242-L260 | train | 22,821 |
jdowner/gist | gist/gist.py | GistAPI.files | def files(self, request, id):
"""Returns a list of files in the gist
Arguments:
request: an initial request object
id: the gist identifier
Returns:
A list of the files
"""
gist = self.send(request, id).json()
return gist['files'] | python | def files(self, request, id):
"""Returns a list of files in the gist
Arguments:
request: an initial request object
id: the gist identifier
Returns:
A list of the files
"""
gist = self.send(request, id).json()
return gist['files'] | [
"def",
"files",
"(",
"self",
",",
"request",
",",
"id",
")",
":",
"gist",
"=",
"self",
".",
"send",
"(",
"request",
",",
"id",
")",
".",
"json",
"(",
")",
"return",
"gist",
"[",
"'files'",
"]"
] | Returns a list of files in the gist
Arguments:
request: an initial request object
id: the gist identifier
Returns:
A list of the files | [
"Returns",
"a",
"list",
"of",
"files",
"in",
"the",
"gist"
] | 0f2941434f63c5aed69218edad454de8c73819a0 | https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L288-L300 | train | 22,822 |
jdowner/gist | gist/gist.py | GistAPI.content | def content(self, request, id):
"""Returns the content of the gist
Arguments:
request: an initial request object
id: the gist identifier
Returns:
A dict containing the contents of each file in the gist
"""
gist = self.send(request, id).json()
def convert(data):
return base64.b64decode(data).decode('utf-8')
content = {}
for name, data in gist['files'].items():
content[name] = convert(data['content'])
return content | python | def content(self, request, id):
"""Returns the content of the gist
Arguments:
request: an initial request object
id: the gist identifier
Returns:
A dict containing the contents of each file in the gist
"""
gist = self.send(request, id).json()
def convert(data):
return base64.b64decode(data).decode('utf-8')
content = {}
for name, data in gist['files'].items():
content[name] = convert(data['content'])
return content | [
"def",
"content",
"(",
"self",
",",
"request",
",",
"id",
")",
":",
"gist",
"=",
"self",
".",
"send",
"(",
"request",
",",
"id",
")",
".",
"json",
"(",
")",
"def",
"convert",
"(",
"data",
")",
":",
"return",
"base64",
".",
"b64decode",
"(",
"data... | Returns the content of the gist
Arguments:
request: an initial request object
id: the gist identifier
Returns:
A dict containing the contents of each file in the gist | [
"Returns",
"the",
"content",
"of",
"the",
"gist"
] | 0f2941434f63c5aed69218edad454de8c73819a0 | https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L303-L323 | train | 22,823 |
jdowner/gist | gist/gist.py | GistAPI.archive | def archive(self, request, id):
"""Create an archive of a gist
The files in the gist are downloaded and added to a compressed archive
(tarball). If the ID of the gist was c78d925546e964b4b1df, the
resulting archive would be,
c78d925546e964b4b1df.tar.gz
The archive is created in the directory where the command is invoked.
Arguments:
request: an initial request object
id: the gist identifier
"""
gist = self.send(request, id).json()
with tarfile.open('{}.tar.gz'.format(id), mode='w:gz') as archive:
for name, data in gist['files'].items():
with tempfile.NamedTemporaryFile('w+') as fp:
fp.write(data['content'])
fp.flush()
archive.add(fp.name, arcname=name) | python | def archive(self, request, id):
"""Create an archive of a gist
The files in the gist are downloaded and added to a compressed archive
(tarball). If the ID of the gist was c78d925546e964b4b1df, the
resulting archive would be,
c78d925546e964b4b1df.tar.gz
The archive is created in the directory where the command is invoked.
Arguments:
request: an initial request object
id: the gist identifier
"""
gist = self.send(request, id).json()
with tarfile.open('{}.tar.gz'.format(id), mode='w:gz') as archive:
for name, data in gist['files'].items():
with tempfile.NamedTemporaryFile('w+') as fp:
fp.write(data['content'])
fp.flush()
archive.add(fp.name, arcname=name) | [
"def",
"archive",
"(",
"self",
",",
"request",
",",
"id",
")",
":",
"gist",
"=",
"self",
".",
"send",
"(",
"request",
",",
"id",
")",
".",
"json",
"(",
")",
"with",
"tarfile",
".",
"open",
"(",
"'{}.tar.gz'",
".",
"format",
"(",
"id",
")",
",",
... | Create an archive of a gist
The files in the gist are downloaded and added to a compressed archive
(tarball). If the ID of the gist was c78d925546e964b4b1df, the
resulting archive would be,
c78d925546e964b4b1df.tar.gz
The archive is created in the directory where the command is invoked.
Arguments:
request: an initial request object
id: the gist identifier | [
"Create",
"an",
"archive",
"of",
"a",
"gist"
] | 0f2941434f63c5aed69218edad454de8c73819a0 | https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L326-L349 | train | 22,824 |
jdowner/gist | gist/gist.py | GistAPI.edit | def edit(self, request, id):
"""Edit a gist
The files in the gist a cloned to a temporary directory and passed to
the default editor (defined by the EDITOR environmental variable). When
the user exits the editor, they will be provided with a prompt to
commit the changes, which will then be pushed to the remote.
Arguments:
request: an initial request object
id: the gist identifier
"""
with pushd(tempfile.gettempdir()):
try:
self.clone(id)
with pushd(id):
files = [f for f in os.listdir('.') if os.path.isfile(f)]
quoted = ['"{}"'.format(f) for f in files]
os.system("{} {}".format(self.editor, ' '.join(quoted)))
os.system('git commit -av && git push')
finally:
shutil.rmtree(id) | python | def edit(self, request, id):
"""Edit a gist
The files in the gist a cloned to a temporary directory and passed to
the default editor (defined by the EDITOR environmental variable). When
the user exits the editor, they will be provided with a prompt to
commit the changes, which will then be pushed to the remote.
Arguments:
request: an initial request object
id: the gist identifier
"""
with pushd(tempfile.gettempdir()):
try:
self.clone(id)
with pushd(id):
files = [f for f in os.listdir('.') if os.path.isfile(f)]
quoted = ['"{}"'.format(f) for f in files]
os.system("{} {}".format(self.editor, ' '.join(quoted)))
os.system('git commit -av && git push')
finally:
shutil.rmtree(id) | [
"def",
"edit",
"(",
"self",
",",
"request",
",",
"id",
")",
":",
"with",
"pushd",
"(",
"tempfile",
".",
"gettempdir",
"(",
")",
")",
":",
"try",
":",
"self",
".",
"clone",
"(",
"id",
")",
"with",
"pushd",
"(",
"id",
")",
":",
"files",
"=",
"[",... | Edit a gist
The files in the gist a cloned to a temporary directory and passed to
the default editor (defined by the EDITOR environmental variable). When
the user exits the editor, they will be provided with a prompt to
commit the changes, which will then be pushed to the remote.
Arguments:
request: an initial request object
id: the gist identifier | [
"Edit",
"a",
"gist"
] | 0f2941434f63c5aed69218edad454de8c73819a0 | https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L352-L375 | train | 22,825 |
jdowner/gist | gist/gist.py | GistAPI.description | def description(self, request, id, description):
"""Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description
"""
request.data = json.dumps({
"description": description
})
return self.send(request, id).json()['html_url'] | python | def description(self, request, id, description):
"""Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description
"""
request.data = json.dumps({
"description": description
})
return self.send(request, id).json()['html_url'] | [
"def",
"description",
"(",
"self",
",",
"request",
",",
"id",
",",
"description",
")",
":",
"request",
".",
"data",
"=",
"json",
".",
"dumps",
"(",
"{",
"\"description\"",
":",
"description",
"}",
")",
"return",
"self",
".",
"send",
"(",
"request",
","... | Updates the description of a gist
Arguments:
request: an initial request object
id: the id of the gist we want to edit the description for
description: the new description | [
"Updates",
"the",
"description",
"of",
"a",
"gist"
] | 0f2941434f63c5aed69218edad454de8c73819a0 | https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L391-L403 | train | 22,826 |
jdowner/gist | gist/gist.py | GistAPI.clone | def clone(self, id, name=None):
"""Clone a gist
Arguments:
id: the gist identifier
name: the name to give the cloned repo
"""
url = 'git@gist.github.com:/{}'.format(id)
if name is None:
os.system('git clone {}'.format(url))
else:
os.system('git clone {} {}'.format(url, name)) | python | def clone(self, id, name=None):
"""Clone a gist
Arguments:
id: the gist identifier
name: the name to give the cloned repo
"""
url = 'git@gist.github.com:/{}'.format(id)
if name is None:
os.system('git clone {}'.format(url))
else:
os.system('git clone {} {}'.format(url, name)) | [
"def",
"clone",
"(",
"self",
",",
"id",
",",
"name",
"=",
"None",
")",
":",
"url",
"=",
"'git@gist.github.com:/{}'",
".",
"format",
"(",
"id",
")",
"if",
"name",
"is",
"None",
":",
"os",
".",
"system",
"(",
"'git clone {}'",
".",
"format",
"(",
"url"... | Clone a gist
Arguments:
id: the gist identifier
name: the name to give the cloned repo | [
"Clone",
"a",
"gist"
] | 0f2941434f63c5aed69218edad454de8c73819a0 | https://github.com/jdowner/gist/blob/0f2941434f63c5aed69218edad454de8c73819a0/gist/gist.py#L405-L418 | train | 22,827 |
Fizzadar/pyinfra | pyinfra/modules/ssh.py | command | def command(state, host, hostname, command, ssh_user=None):
'''
Execute commands on other servers over SSH.
+ hostname: the hostname to connect to
+ command: the command to execute
+ ssh_user: connect with this user
'''
connection_target = hostname
if ssh_user:
connection_target = '@'.join((ssh_user, hostname))
yield 'ssh {0} "{1}"'.format(connection_target, command) | python | def command(state, host, hostname, command, ssh_user=None):
'''
Execute commands on other servers over SSH.
+ hostname: the hostname to connect to
+ command: the command to execute
+ ssh_user: connect with this user
'''
connection_target = hostname
if ssh_user:
connection_target = '@'.join((ssh_user, hostname))
yield 'ssh {0} "{1}"'.format(connection_target, command) | [
"def",
"command",
"(",
"state",
",",
"host",
",",
"hostname",
",",
"command",
",",
"ssh_user",
"=",
"None",
")",
":",
"connection_target",
"=",
"hostname",
"if",
"ssh_user",
":",
"connection_target",
"=",
"'@'",
".",
"join",
"(",
"(",
"ssh_user",
",",
"h... | Execute commands on other servers over SSH.
+ hostname: the hostname to connect to
+ command: the command to execute
+ ssh_user: connect with this user | [
"Execute",
"commands",
"on",
"other",
"servers",
"over",
"SSH",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/ssh.py#L47-L60 | train | 22,828 |
Fizzadar/pyinfra | pyinfra/modules/ssh.py | upload | def upload(
state, host, hostname, filename,
remote_filename=None, use_remote_sudo=False,
ssh_keyscan=False, ssh_user=None,
):
'''
Upload files to other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to upload
+ remote_filename: where to upload the file to (defaults to ``filename``)
+ use_remote_sudo: upload to a temporary location and move using sudo
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user
'''
remote_filename = remote_filename or filename
# Figure out where we're connecting (host or user@host)
connection_target = hostname
if ssh_user:
connection_target = '@'.join((ssh_user, hostname))
if ssh_keyscan:
yield keyscan(state, host, hostname)
# If we're not using sudo on the remote side, just scp the file over
if not use_remote_sudo:
yield 'scp {0} {1}:{2}'.format(filename, connection_target, remote_filename)
else:
# Otherwise - we need a temporary location for the file
temp_remote_filename = state.get_temp_filename()
# scp it to the temporary location
upload_cmd = 'scp {0} {1}:{2}'.format(
filename, connection_target, temp_remote_filename,
)
yield upload_cmd
# And sudo sudo to move it
yield command(state, host, connection_target, 'sudo mv {0} {1}'.format(
temp_remote_filename, remote_filename,
)) | python | def upload(
state, host, hostname, filename,
remote_filename=None, use_remote_sudo=False,
ssh_keyscan=False, ssh_user=None,
):
'''
Upload files to other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to upload
+ remote_filename: where to upload the file to (defaults to ``filename``)
+ use_remote_sudo: upload to a temporary location and move using sudo
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user
'''
remote_filename = remote_filename or filename
# Figure out where we're connecting (host or user@host)
connection_target = hostname
if ssh_user:
connection_target = '@'.join((ssh_user, hostname))
if ssh_keyscan:
yield keyscan(state, host, hostname)
# If we're not using sudo on the remote side, just scp the file over
if not use_remote_sudo:
yield 'scp {0} {1}:{2}'.format(filename, connection_target, remote_filename)
else:
# Otherwise - we need a temporary location for the file
temp_remote_filename = state.get_temp_filename()
# scp it to the temporary location
upload_cmd = 'scp {0} {1}:{2}'.format(
filename, connection_target, temp_remote_filename,
)
yield upload_cmd
# And sudo sudo to move it
yield command(state, host, connection_target, 'sudo mv {0} {1}'.format(
temp_remote_filename, remote_filename,
)) | [
"def",
"upload",
"(",
"state",
",",
"host",
",",
"hostname",
",",
"filename",
",",
"remote_filename",
"=",
"None",
",",
"use_remote_sudo",
"=",
"False",
",",
"ssh_keyscan",
"=",
"False",
",",
"ssh_user",
"=",
"None",
",",
")",
":",
"remote_filename",
"=",
... | Upload files to other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to upload
+ remote_filename: where to upload the file to (defaults to ``filename``)
+ use_remote_sudo: upload to a temporary location and move using sudo
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user | [
"Upload",
"files",
"to",
"other",
"servers",
"using",
"scp",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/ssh.py#L64-L108 | train | 22,829 |
Fizzadar/pyinfra | pyinfra/modules/ssh.py | download | def download(
state, host, hostname, filename,
local_filename=None, force=False,
ssh_keyscan=False, ssh_user=None,
):
'''
Download files from other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to download
+ local_filename: where to download the file to (defaults to ``filename``)
+ force: always download the file, even if present locally
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user
'''
local_filename = local_filename or filename
# Get local file info
local_file_info = host.fact.file(local_filename)
# Local file exists but isn't a file?
if local_file_info is False:
raise OperationError(
'Local destination {0} already exists and is not a file'.format(
local_filename,
),
)
# If the local file exists and we're not forcing a re-download, no-op
if local_file_info and not force:
return
# Figure out where we're connecting (host or user@host)
connection_target = hostname
if ssh_user:
connection_target = '@'.join((ssh_user, hostname))
if ssh_keyscan:
yield keyscan(state, host, hostname)
# Download the file with scp
yield 'scp {0}:{1} {2}'.format(connection_target, filename, local_filename) | python | def download(
state, host, hostname, filename,
local_filename=None, force=False,
ssh_keyscan=False, ssh_user=None,
):
'''
Download files from other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to download
+ local_filename: where to download the file to (defaults to ``filename``)
+ force: always download the file, even if present locally
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user
'''
local_filename = local_filename or filename
# Get local file info
local_file_info = host.fact.file(local_filename)
# Local file exists but isn't a file?
if local_file_info is False:
raise OperationError(
'Local destination {0} already exists and is not a file'.format(
local_filename,
),
)
# If the local file exists and we're not forcing a re-download, no-op
if local_file_info and not force:
return
# Figure out where we're connecting (host or user@host)
connection_target = hostname
if ssh_user:
connection_target = '@'.join((ssh_user, hostname))
if ssh_keyscan:
yield keyscan(state, host, hostname)
# Download the file with scp
yield 'scp {0}:{1} {2}'.format(connection_target, filename, local_filename) | [
"def",
"download",
"(",
"state",
",",
"host",
",",
"hostname",
",",
"filename",
",",
"local_filename",
"=",
"None",
",",
"force",
"=",
"False",
",",
"ssh_keyscan",
"=",
"False",
",",
"ssh_user",
"=",
"None",
",",
")",
":",
"local_filename",
"=",
"local_f... | Download files from other servers using ``scp``.
+ hostname: hostname to upload to
+ filename: file to download
+ local_filename: where to download the file to (defaults to ``filename``)
+ force: always download the file, even if present locally
+ ssh_keyscan: execute ``ssh.keyscan`` before uploading the file
+ ssh_user: connect with this user | [
"Download",
"files",
"from",
"other",
"servers",
"using",
"scp",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/ssh.py#L112-L154 | train | 22,830 |
Fizzadar/pyinfra | pyinfra/api/util.py | pop_op_kwargs | def pop_op_kwargs(state, kwargs):
'''
Pop and return operation global keyword arguments.
'''
meta_kwargs = state.deploy_kwargs or {}
def get_kwarg(key, default=None):
return kwargs.pop(key, meta_kwargs.get(key, default))
# Get the env for this host: config env followed by command-level env
env = state.config.ENV.copy()
env.update(get_kwarg('env', {}))
hosts = get_kwarg('hosts')
hosts = ensure_host_list(hosts, inventory=state.inventory)
# Filter out any hosts not in the meta kwargs (nested support)
if meta_kwargs.get('hosts') is not None:
hosts = [
host for host in hosts
if host in meta_kwargs['hosts']
]
return {
# ENVars for commands in this operation
'env': env,
# Hosts to limit the op to
'hosts': hosts,
# When to limit the op (default always)
'when': get_kwarg('when', True),
# Locally & globally configurable
'sudo': get_kwarg('sudo', state.config.SUDO),
'sudo_user': get_kwarg('sudo_user', state.config.SUDO_USER),
'su_user': get_kwarg('su_user', state.config.SU_USER),
# Whether to preserve ENVars when sudoing (eg SSH forward agent socket)
'preserve_sudo_env': get_kwarg(
'preserve_sudo_env', state.config.PRESERVE_SUDO_ENV,
),
# Ignore any errors during this operation
'ignore_errors': get_kwarg(
'ignore_errors', state.config.IGNORE_ERRORS,
),
# Timeout on running the command
'timeout': get_kwarg('timeout'),
# Get a PTY before executing commands
'get_pty': get_kwarg('get_pty', False),
# Forces serial mode for this operation (--serial for one op)
'serial': get_kwarg('serial', False),
# Only runs this operation once
'run_once': get_kwarg('run_once', False),
# Execute in batches of X hosts rather than all at once
'parallel': get_kwarg('parallel'),
# Callbacks
'on_success': get_kwarg('on_success'),
'on_error': get_kwarg('on_error'),
# Operation hash
'op': get_kwarg('op'),
} | python | def pop_op_kwargs(state, kwargs):
'''
Pop and return operation global keyword arguments.
'''
meta_kwargs = state.deploy_kwargs or {}
def get_kwarg(key, default=None):
return kwargs.pop(key, meta_kwargs.get(key, default))
# Get the env for this host: config env followed by command-level env
env = state.config.ENV.copy()
env.update(get_kwarg('env', {}))
hosts = get_kwarg('hosts')
hosts = ensure_host_list(hosts, inventory=state.inventory)
# Filter out any hosts not in the meta kwargs (nested support)
if meta_kwargs.get('hosts') is not None:
hosts = [
host for host in hosts
if host in meta_kwargs['hosts']
]
return {
# ENVars for commands in this operation
'env': env,
# Hosts to limit the op to
'hosts': hosts,
# When to limit the op (default always)
'when': get_kwarg('when', True),
# Locally & globally configurable
'sudo': get_kwarg('sudo', state.config.SUDO),
'sudo_user': get_kwarg('sudo_user', state.config.SUDO_USER),
'su_user': get_kwarg('su_user', state.config.SU_USER),
# Whether to preserve ENVars when sudoing (eg SSH forward agent socket)
'preserve_sudo_env': get_kwarg(
'preserve_sudo_env', state.config.PRESERVE_SUDO_ENV,
),
# Ignore any errors during this operation
'ignore_errors': get_kwarg(
'ignore_errors', state.config.IGNORE_ERRORS,
),
# Timeout on running the command
'timeout': get_kwarg('timeout'),
# Get a PTY before executing commands
'get_pty': get_kwarg('get_pty', False),
# Forces serial mode for this operation (--serial for one op)
'serial': get_kwarg('serial', False),
# Only runs this operation once
'run_once': get_kwarg('run_once', False),
# Execute in batches of X hosts rather than all at once
'parallel': get_kwarg('parallel'),
# Callbacks
'on_success': get_kwarg('on_success'),
'on_error': get_kwarg('on_error'),
# Operation hash
'op': get_kwarg('op'),
} | [
"def",
"pop_op_kwargs",
"(",
"state",
",",
"kwargs",
")",
":",
"meta_kwargs",
"=",
"state",
".",
"deploy_kwargs",
"or",
"{",
"}",
"def",
"get_kwarg",
"(",
"key",
",",
"default",
"=",
"None",
")",
":",
"return",
"kwargs",
".",
"pop",
"(",
"key",
",",
... | Pop and return operation global keyword arguments. | [
"Pop",
"and",
"return",
"operation",
"global",
"keyword",
"arguments",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/util.py#L119-L177 | train | 22,831 |
Fizzadar/pyinfra | pyinfra/api/util.py | get_template | def get_template(filename_or_string, is_string=False):
'''
Gets a jinja2 ``Template`` object for the input filename or string, with caching
based on the filename of the template, or the SHA1 of the input string.
'''
# Cache against string sha or just the filename
cache_key = sha1_hash(filename_or_string) if is_string else filename_or_string
if cache_key in TEMPLATES:
return TEMPLATES[cache_key]
if is_string:
# Set the input string as our template
template_string = filename_or_string
else:
# Load template data into memory
with open(filename_or_string, 'r') as file_io:
template_string = file_io.read()
TEMPLATES[cache_key] = Template(template_string, keep_trailing_newline=True)
return TEMPLATES[cache_key] | python | def get_template(filename_or_string, is_string=False):
'''
Gets a jinja2 ``Template`` object for the input filename or string, with caching
based on the filename of the template, or the SHA1 of the input string.
'''
# Cache against string sha or just the filename
cache_key = sha1_hash(filename_or_string) if is_string else filename_or_string
if cache_key in TEMPLATES:
return TEMPLATES[cache_key]
if is_string:
# Set the input string as our template
template_string = filename_or_string
else:
# Load template data into memory
with open(filename_or_string, 'r') as file_io:
template_string = file_io.read()
TEMPLATES[cache_key] = Template(template_string, keep_trailing_newline=True)
return TEMPLATES[cache_key] | [
"def",
"get_template",
"(",
"filename_or_string",
",",
"is_string",
"=",
"False",
")",
":",
"# Cache against string sha or just the filename",
"cache_key",
"=",
"sha1_hash",
"(",
"filename_or_string",
")",
"if",
"is_string",
"else",
"filename_or_string",
"if",
"cache_key"... | Gets a jinja2 ``Template`` object for the input filename or string, with caching
based on the filename of the template, or the SHA1 of the input string. | [
"Gets",
"a",
"jinja2",
"Template",
"object",
"for",
"the",
"input",
"filename",
"or",
"string",
"with",
"caching",
"based",
"on",
"the",
"filename",
"of",
"the",
"template",
"or",
"the",
"SHA1",
"of",
"the",
"input",
"string",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/util.py#L202-L224 | train | 22,832 |
Fizzadar/pyinfra | pyinfra/api/util.py | underscore | def underscore(name):
'''
Transform CamelCase -> snake_case.
'''
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | python | def underscore(name):
'''
Transform CamelCase -> snake_case.
'''
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | [
"def",
"underscore",
"(",
"name",
")",
":",
"s1",
"=",
"re",
".",
"sub",
"(",
"'(.)([A-Z][a-z]+)'",
",",
"r'\\1_\\2'",
",",
"name",
")",
"return",
"re",
".",
"sub",
"(",
"'([a-z0-9])([A-Z])'",
",",
"r'\\1_\\2'",
",",
"s1",
")",
".",
"lower",
"(",
")"
] | Transform CamelCase -> snake_case. | [
"Transform",
"CamelCase",
"-",
">",
"snake_case",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/util.py#L227-L233 | train | 22,833 |
Fizzadar/pyinfra | pyinfra/api/util.py | sha1_hash | def sha1_hash(string):
'''
Return the SHA1 of the input string.
'''
hasher = sha1()
hasher.update(string.encode())
return hasher.hexdigest() | python | def sha1_hash(string):
'''
Return the SHA1 of the input string.
'''
hasher = sha1()
hasher.update(string.encode())
return hasher.hexdigest() | [
"def",
"sha1_hash",
"(",
"string",
")",
":",
"hasher",
"=",
"sha1",
"(",
")",
"hasher",
".",
"update",
"(",
"string",
".",
"encode",
"(",
")",
")",
"return",
"hasher",
".",
"hexdigest",
"(",
")"
] | Return the SHA1 of the input string. | [
"Return",
"the",
"SHA1",
"of",
"the",
"input",
"string",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/util.py#L236-L243 | train | 22,834 |
Fizzadar/pyinfra | pyinfra/api/util.py | make_command | def make_command(
command,
env=None,
su_user=None,
sudo=False,
sudo_user=None,
preserve_sudo_env=False,
):
'''
Builds a shell command with various kwargs.
'''
debug_meta = {}
for key, value in (
('sudo', sudo),
('sudo_user', sudo_user),
('su_user', su_user),
('env', env),
):
if value:
debug_meta[key] = value
logger.debug('Building command ({0}): {1}'.format(' '.join(
'{0}: {1}'.format(key, value)
for key, value in six.iteritems(debug_meta)
), command))
# Use env & build our actual command
if env:
env_string = ' '.join([
'{0}={1}'.format(key, value)
for key, value in six.iteritems(env)
])
command = 'export {0}; {1}'.format(env_string, command)
# Quote the command as a string
command = shlex_quote(command)
# Switch user with su
if su_user:
command = 'su {0} -c {1}'.format(su_user, command)
# Otherwise just sh wrap the command
else:
command = 'sh -c {0}'.format(command)
# Use sudo (w/user?)
if sudo:
sudo_bits = ['sudo', '-H']
if preserve_sudo_env:
sudo_bits.append('-E')
if sudo_user:
sudo_bits.extend(('-u', sudo_user))
command = '{0} {1}'.format(' '.join(sudo_bits), command)
return command | python | def make_command(
command,
env=None,
su_user=None,
sudo=False,
sudo_user=None,
preserve_sudo_env=False,
):
'''
Builds a shell command with various kwargs.
'''
debug_meta = {}
for key, value in (
('sudo', sudo),
('sudo_user', sudo_user),
('su_user', su_user),
('env', env),
):
if value:
debug_meta[key] = value
logger.debug('Building command ({0}): {1}'.format(' '.join(
'{0}: {1}'.format(key, value)
for key, value in six.iteritems(debug_meta)
), command))
# Use env & build our actual command
if env:
env_string = ' '.join([
'{0}={1}'.format(key, value)
for key, value in six.iteritems(env)
])
command = 'export {0}; {1}'.format(env_string, command)
# Quote the command as a string
command = shlex_quote(command)
# Switch user with su
if su_user:
command = 'su {0} -c {1}'.format(su_user, command)
# Otherwise just sh wrap the command
else:
command = 'sh -c {0}'.format(command)
# Use sudo (w/user?)
if sudo:
sudo_bits = ['sudo', '-H']
if preserve_sudo_env:
sudo_bits.append('-E')
if sudo_user:
sudo_bits.extend(('-u', sudo_user))
command = '{0} {1}'.format(' '.join(sudo_bits), command)
return command | [
"def",
"make_command",
"(",
"command",
",",
"env",
"=",
"None",
",",
"su_user",
"=",
"None",
",",
"sudo",
"=",
"False",
",",
"sudo_user",
"=",
"None",
",",
"preserve_sudo_env",
"=",
"False",
",",
")",
":",
"debug_meta",
"=",
"{",
"}",
"for",
"key",
"... | Builds a shell command with various kwargs. | [
"Builds",
"a",
"shell",
"command",
"with",
"various",
"kwargs",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/util.py#L280-L339 | train | 22,835 |
Fizzadar/pyinfra | pyinfra/api/util.py | make_hash | def make_hash(obj):
'''
Make a hash from an arbitrary nested dictionary, list, tuple or set, used to generate
ID's for operations based on their name & arguments.
'''
if isinstance(obj, (set, tuple, list)):
hash_string = ''.join([make_hash(e) for e in obj])
elif isinstance(obj, dict):
hash_string = ''.join(
''.join((key, make_hash(value)))
for key, value in six.iteritems(obj)
)
else:
hash_string = (
# Constants - the values can change between hosts but we should still
# group them under the same operation hash.
'_PYINFRA_CONSTANT' if obj in (True, False, None)
# Plain strings
else obj if isinstance(obj, six.string_types)
# Objects with __name__s
else obj.__name__ if hasattr(obj, '__name__')
# Objects with names
else obj.name if hasattr(obj, 'name')
# Repr anything else
else repr(obj)
)
return sha1_hash(hash_string) | python | def make_hash(obj):
'''
Make a hash from an arbitrary nested dictionary, list, tuple or set, used to generate
ID's for operations based on their name & arguments.
'''
if isinstance(obj, (set, tuple, list)):
hash_string = ''.join([make_hash(e) for e in obj])
elif isinstance(obj, dict):
hash_string = ''.join(
''.join((key, make_hash(value)))
for key, value in six.iteritems(obj)
)
else:
hash_string = (
# Constants - the values can change between hosts but we should still
# group them under the same operation hash.
'_PYINFRA_CONSTANT' if obj in (True, False, None)
# Plain strings
else obj if isinstance(obj, six.string_types)
# Objects with __name__s
else obj.__name__ if hasattr(obj, '__name__')
# Objects with names
else obj.name if hasattr(obj, 'name')
# Repr anything else
else repr(obj)
)
return sha1_hash(hash_string) | [
"def",
"make_hash",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"(",
"set",
",",
"tuple",
",",
"list",
")",
")",
":",
"hash_string",
"=",
"''",
".",
"join",
"(",
"[",
"make_hash",
"(",
"e",
")",
"for",
"e",
"in",
"obj",
"]",
")",... | Make a hash from an arbitrary nested dictionary, list, tuple or set, used to generate
ID's for operations based on their name & arguments. | [
"Make",
"a",
"hash",
"from",
"an",
"arbitrary",
"nested",
"dictionary",
"list",
"tuple",
"or",
"set",
"used",
"to",
"generate",
"ID",
"s",
"for",
"operations",
"based",
"on",
"their",
"name",
"&",
"arguments",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/util.py#L376-L406 | train | 22,836 |
Fizzadar/pyinfra | pyinfra/api/util.py | get_file_sha1 | def get_file_sha1(filename_or_io):
'''
Calculates the SHA1 of a file or file object using a buffer to handle larger files.
'''
file_data = get_file_io(filename_or_io)
cache_key = file_data.cache_key
if cache_key and cache_key in FILE_SHAS:
return FILE_SHAS[cache_key]
with file_data as file_io:
hasher = sha1()
buff = file_io.read(BLOCKSIZE)
while len(buff) > 0:
if isinstance(buff, six.text_type):
buff = buff.encode('utf-8')
hasher.update(buff)
buff = file_io.read(BLOCKSIZE)
digest = hasher.hexdigest()
if cache_key:
FILE_SHAS[cache_key] = digest
return digest | python | def get_file_sha1(filename_or_io):
'''
Calculates the SHA1 of a file or file object using a buffer to handle larger files.
'''
file_data = get_file_io(filename_or_io)
cache_key = file_data.cache_key
if cache_key and cache_key in FILE_SHAS:
return FILE_SHAS[cache_key]
with file_data as file_io:
hasher = sha1()
buff = file_io.read(BLOCKSIZE)
while len(buff) > 0:
if isinstance(buff, six.text_type):
buff = buff.encode('utf-8')
hasher.update(buff)
buff = file_io.read(BLOCKSIZE)
digest = hasher.hexdigest()
if cache_key:
FILE_SHAS[cache_key] = digest
return digest | [
"def",
"get_file_sha1",
"(",
"filename_or_io",
")",
":",
"file_data",
"=",
"get_file_io",
"(",
"filename_or_io",
")",
"cache_key",
"=",
"file_data",
".",
"cache_key",
"if",
"cache_key",
"and",
"cache_key",
"in",
"FILE_SHAS",
":",
"return",
"FILE_SHAS",
"[",
"cac... | Calculates the SHA1 of a file or file object using a buffer to handle larger files. | [
"Calculates",
"the",
"SHA1",
"of",
"a",
"file",
"or",
"file",
"object",
"using",
"a",
"buffer",
"to",
"handle",
"larger",
"files",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/util.py#L459-L486 | train | 22,837 |
Fizzadar/pyinfra | pyinfra/api/util.py | read_buffer | def read_buffer(io, print_output=False, print_func=None):
'''
Reads a file-like buffer object into lines and optionally prints the output.
'''
# TODO: research this further - some steps towards handling stdin (ie password requests
# from programs that don't notice there's no TTY to accept passwords from!). This just
# prints output as below, but stores partial lines in a buffer, which could be printed
# when ready to accept input. Or detected and raise an error.
# GitHub issue: https://github.com/Fizzadar/pyinfra/issues/40
# buff = ''
# data = io.read(1)
# while data:
# # Append to the buffer
# buff += data
# # Newlines in the buffer? Break them out
# if '\n' in buff:
# lines = buff.split('\n')
# # Set the buffer back to just the last line
# buff = lines[-1]
# # Get the other lines, strip them
# lines = [
# line.strip()
# for line in lines[:-1]
# ]
# out.extend(lines)
# for line in lines:
# _print(line)
# # Get next data
# data = io.read(1)
# if buff:
# line = buff.strip()
# out.append(line)
# _print(line)
def _print(line):
if print_output:
if print_func:
formatted_line = print_func(line)
else:
formatted_line = line
encoded_line = unicode(formatted_line).encode('utf-8')
print(encoded_line)
out = []
for line in io:
# Handle local Popen shells returning list of bytes, not strings
if not isinstance(line, six.text_type):
line = line.decode('utf-8')
line = line.strip()
out.append(line)
_print(line)
return out | python | def read_buffer(io, print_output=False, print_func=None):
'''
Reads a file-like buffer object into lines and optionally prints the output.
'''
# TODO: research this further - some steps towards handling stdin (ie password requests
# from programs that don't notice there's no TTY to accept passwords from!). This just
# prints output as below, but stores partial lines in a buffer, which could be printed
# when ready to accept input. Or detected and raise an error.
# GitHub issue: https://github.com/Fizzadar/pyinfra/issues/40
# buff = ''
# data = io.read(1)
# while data:
# # Append to the buffer
# buff += data
# # Newlines in the buffer? Break them out
# if '\n' in buff:
# lines = buff.split('\n')
# # Set the buffer back to just the last line
# buff = lines[-1]
# # Get the other lines, strip them
# lines = [
# line.strip()
# for line in lines[:-1]
# ]
# out.extend(lines)
# for line in lines:
# _print(line)
# # Get next data
# data = io.read(1)
# if buff:
# line = buff.strip()
# out.append(line)
# _print(line)
def _print(line):
if print_output:
if print_func:
formatted_line = print_func(line)
else:
formatted_line = line
encoded_line = unicode(formatted_line).encode('utf-8')
print(encoded_line)
out = []
for line in io:
# Handle local Popen shells returning list of bytes, not strings
if not isinstance(line, six.text_type):
line = line.decode('utf-8')
line = line.strip()
out.append(line)
_print(line)
return out | [
"def",
"read_buffer",
"(",
"io",
",",
"print_output",
"=",
"False",
",",
"print_func",
"=",
"None",
")",
":",
"# TODO: research this further - some steps towards handling stdin (ie password requests",
"# from programs that don't notice there's no TTY to accept passwords from!). This ju... | Reads a file-like buffer object into lines and optionally prints the output. | [
"Reads",
"a",
"file",
"-",
"like",
"buffer",
"object",
"into",
"lines",
"and",
"optionally",
"prints",
"the",
"output",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/util.py#L489-L555 | train | 22,838 |
Fizzadar/pyinfra | pyinfra/modules/vzctl.py | start | def start(state, host, ctid, force=False):
'''
Start OpenVZ containers.
+ ctid: CTID of the container to start
+ force: whether to force container start
'''
args = ['{0}'.format(ctid)]
if force:
args.append('--force')
yield 'vzctl start {0}'.format(' '.join(args)) | python | def start(state, host, ctid, force=False):
'''
Start OpenVZ containers.
+ ctid: CTID of the container to start
+ force: whether to force container start
'''
args = ['{0}'.format(ctid)]
if force:
args.append('--force')
yield 'vzctl start {0}'.format(' '.join(args)) | [
"def",
"start",
"(",
"state",
",",
"host",
",",
"ctid",
",",
"force",
"=",
"False",
")",
":",
"args",
"=",
"[",
"'{0}'",
".",
"format",
"(",
"ctid",
")",
"]",
"if",
"force",
":",
"args",
".",
"append",
"(",
"'--force'",
")",
"yield",
"'vzctl start ... | Start OpenVZ containers.
+ ctid: CTID of the container to start
+ force: whether to force container start | [
"Start",
"OpenVZ",
"containers",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/vzctl.py#L15-L28 | train | 22,839 |
Fizzadar/pyinfra | pyinfra/modules/vzctl.py | stop | def stop(state, host, ctid):
'''
Stop OpenVZ containers.
+ ctid: CTID of the container to stop
'''
args = ['{0}'.format(ctid)]
yield 'vzctl stop {0}'.format(' '.join(args)) | python | def stop(state, host, ctid):
'''
Stop OpenVZ containers.
+ ctid: CTID of the container to stop
'''
args = ['{0}'.format(ctid)]
yield 'vzctl stop {0}'.format(' '.join(args)) | [
"def",
"stop",
"(",
"state",
",",
"host",
",",
"ctid",
")",
":",
"args",
"=",
"[",
"'{0}'",
".",
"format",
"(",
"ctid",
")",
"]",
"yield",
"'vzctl stop {0}'",
".",
"format",
"(",
"' '",
".",
"join",
"(",
"args",
")",
")"
] | Stop OpenVZ containers.
+ ctid: CTID of the container to stop | [
"Stop",
"OpenVZ",
"containers",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/vzctl.py#L32-L41 | train | 22,840 |
Fizzadar/pyinfra | pyinfra/modules/vzctl.py | restart | def restart(state, host, ctid, force=False):
'''
Restart OpenVZ containers.
+ ctid: CTID of the container to restart
+ force: whether to force container start
'''
yield stop(state, host, ctid)
yield start(state, host, ctid, force=force) | python | def restart(state, host, ctid, force=False):
'''
Restart OpenVZ containers.
+ ctid: CTID of the container to restart
+ force: whether to force container start
'''
yield stop(state, host, ctid)
yield start(state, host, ctid, force=force) | [
"def",
"restart",
"(",
"state",
",",
"host",
",",
"ctid",
",",
"force",
"=",
"False",
")",
":",
"yield",
"stop",
"(",
"state",
",",
"host",
",",
"ctid",
")",
"yield",
"start",
"(",
"state",
",",
"host",
",",
"ctid",
",",
"force",
"=",
"force",
")... | Restart OpenVZ containers.
+ ctid: CTID of the container to restart
+ force: whether to force container start | [
"Restart",
"OpenVZ",
"containers",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/vzctl.py#L45-L54 | train | 22,841 |
Fizzadar/pyinfra | pyinfra/modules/vzctl.py | create | def create(state, host, ctid, template=None):
'''
Create OpenVZ containers.
+ ctid: CTID of the container to create
'''
# Check we don't already have a container with this CTID
current_containers = host.fact.openvz_containers
if ctid in current_containers:
raise OperationError(
'An OpenVZ container with CTID {0} already exists'.format(ctid),
)
args = ['{0}'.format(ctid)]
if template:
args.append('--ostemplate {0}'.format(template))
yield 'vzctl create {0}'.format(' '.join(args)) | python | def create(state, host, ctid, template=None):
'''
Create OpenVZ containers.
+ ctid: CTID of the container to create
'''
# Check we don't already have a container with this CTID
current_containers = host.fact.openvz_containers
if ctid in current_containers:
raise OperationError(
'An OpenVZ container with CTID {0} already exists'.format(ctid),
)
args = ['{0}'.format(ctid)]
if template:
args.append('--ostemplate {0}'.format(template))
yield 'vzctl create {0}'.format(' '.join(args)) | [
"def",
"create",
"(",
"state",
",",
"host",
",",
"ctid",
",",
"template",
"=",
"None",
")",
":",
"# Check we don't already have a container with this CTID",
"current_containers",
"=",
"host",
".",
"fact",
".",
"openvz_containers",
"if",
"ctid",
"in",
"current_contai... | Create OpenVZ containers.
+ ctid: CTID of the container to create | [
"Create",
"OpenVZ",
"containers",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/vzctl.py#L91-L110 | train | 22,842 |
Fizzadar/pyinfra | pyinfra/modules/vzctl.py | set | def set(state, host, ctid, save=True, **settings):
'''
Set OpenVZ container details.
+ ctid: CTID of the container to set
+ save: whether to save the changes
+ settings: settings/arguments to apply to the container
Settings/arguments:
these are mapped directly to ``vztctl`` arguments, eg
``hostname='my-host.net'`` becomes ``--hostname my-host.net``.
'''
args = ['{0}'.format(ctid)]
if save:
args.append('--save')
for key, value in six.iteritems(settings):
# Handle list values (eg --nameserver X --nameserver X)
if isinstance(value, list):
args.extend('--{0} {1}'.format(key, v) for v in value)
else:
args.append('--{0} {1}'.format(key, value))
yield 'vzctl set {0}'.format(' '.join(args)) | python | def set(state, host, ctid, save=True, **settings):
'''
Set OpenVZ container details.
+ ctid: CTID of the container to set
+ save: whether to save the changes
+ settings: settings/arguments to apply to the container
Settings/arguments:
these are mapped directly to ``vztctl`` arguments, eg
``hostname='my-host.net'`` becomes ``--hostname my-host.net``.
'''
args = ['{0}'.format(ctid)]
if save:
args.append('--save')
for key, value in six.iteritems(settings):
# Handle list values (eg --nameserver X --nameserver X)
if isinstance(value, list):
args.extend('--{0} {1}'.format(key, v) for v in value)
else:
args.append('--{0} {1}'.format(key, value))
yield 'vzctl set {0}'.format(' '.join(args)) | [
"def",
"set",
"(",
"state",
",",
"host",
",",
"ctid",
",",
"save",
"=",
"True",
",",
"*",
"*",
"settings",
")",
":",
"args",
"=",
"[",
"'{0}'",
".",
"format",
"(",
"ctid",
")",
"]",
"if",
"save",
":",
"args",
".",
"append",
"(",
"'--save'",
")"... | Set OpenVZ container details.
+ ctid: CTID of the container to set
+ save: whether to save the changes
+ settings: settings/arguments to apply to the container
Settings/arguments:
these are mapped directly to ``vztctl`` arguments, eg
``hostname='my-host.net'`` becomes ``--hostname my-host.net``. | [
"Set",
"OpenVZ",
"container",
"details",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/vzctl.py#L114-L139 | train | 22,843 |
Fizzadar/pyinfra | pyinfra_cli/util.py | exec_file | def exec_file(filename, return_locals=False, is_deploy_code=False):
'''
Execute a Python file and optionally return it's attributes as a dict.
'''
if filename not in PYTHON_CODES:
with open(filename, 'r') as f:
code = f.read()
code = compile(code, filename, 'exec')
PYTHON_CODES[filename] = code
# Create some base attributes for our "module"
data = {
'__file__': filename,
'state': pseudo_state,
}
# Execute the code with locals/globals going into the dict above
exec(PYTHON_CODES[filename], data)
return data | python | def exec_file(filename, return_locals=False, is_deploy_code=False):
'''
Execute a Python file and optionally return it's attributes as a dict.
'''
if filename not in PYTHON_CODES:
with open(filename, 'r') as f:
code = f.read()
code = compile(code, filename, 'exec')
PYTHON_CODES[filename] = code
# Create some base attributes for our "module"
data = {
'__file__': filename,
'state': pseudo_state,
}
# Execute the code with locals/globals going into the dict above
exec(PYTHON_CODES[filename], data)
return data | [
"def",
"exec_file",
"(",
"filename",
",",
"return_locals",
"=",
"False",
",",
"is_deploy_code",
"=",
"False",
")",
":",
"if",
"filename",
"not",
"in",
"PYTHON_CODES",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"f",
":",
"code",
"=",
"... | Execute a Python file and optionally return it's attributes as a dict. | [
"Execute",
"a",
"Python",
"file",
"and",
"optionally",
"return",
"it",
"s",
"attributes",
"as",
"a",
"dict",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra_cli/util.py#L37-L58 | train | 22,844 |
Fizzadar/pyinfra | pyinfra/modules/server.py | shell | def shell(state, host, commands, chdir=None):
'''
Run raw shell code.
+ commands: command or list of commands to execute on the remote server
+ chdir: directory to cd into before executing commands
'''
# Ensure we have a list
if isinstance(commands, six.string_types):
commands = [commands]
for command in commands:
if chdir:
yield 'cd {0} && ({1})'.format(chdir, command)
else:
yield command | python | def shell(state, host, commands, chdir=None):
'''
Run raw shell code.
+ commands: command or list of commands to execute on the remote server
+ chdir: directory to cd into before executing commands
'''
# Ensure we have a list
if isinstance(commands, six.string_types):
commands = [commands]
for command in commands:
if chdir:
yield 'cd {0} && ({1})'.format(chdir, command)
else:
yield command | [
"def",
"shell",
"(",
"state",
",",
"host",
",",
"commands",
",",
"chdir",
"=",
"None",
")",
":",
"# Ensure we have a list",
"if",
"isinstance",
"(",
"commands",
",",
"six",
".",
"string_types",
")",
":",
"commands",
"=",
"[",
"commands",
"]",
"for",
"com... | Run raw shell code.
+ commands: command or list of commands to execute on the remote server
+ chdir: directory to cd into before executing commands | [
"Run",
"raw",
"shell",
"code",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/server.py#L39-L55 | train | 22,845 |
Fizzadar/pyinfra | pyinfra/modules/server.py | script | def script(state, host, filename, chdir=None):
'''
Upload and execute a local script on the remote host.
+ filename: local script filename to upload & execute
+ chdir: directory to cd into before executing the script
'''
temp_file = state.get_temp_filename(filename)
yield files.put(state, host, filename, temp_file)
yield chmod(temp_file, '+x')
if chdir:
yield 'cd {0} && {1}'.format(chdir, temp_file)
else:
yield temp_file | python | def script(state, host, filename, chdir=None):
'''
Upload and execute a local script on the remote host.
+ filename: local script filename to upload & execute
+ chdir: directory to cd into before executing the script
'''
temp_file = state.get_temp_filename(filename)
yield files.put(state, host, filename, temp_file)
yield chmod(temp_file, '+x')
if chdir:
yield 'cd {0} && {1}'.format(chdir, temp_file)
else:
yield temp_file | [
"def",
"script",
"(",
"state",
",",
"host",
",",
"filename",
",",
"chdir",
"=",
"None",
")",
":",
"temp_file",
"=",
"state",
".",
"get_temp_filename",
"(",
"filename",
")",
"yield",
"files",
".",
"put",
"(",
"state",
",",
"host",
",",
"filename",
",",
... | Upload and execute a local script on the remote host.
+ filename: local script filename to upload & execute
+ chdir: directory to cd into before executing the script | [
"Upload",
"and",
"execute",
"a",
"local",
"script",
"on",
"the",
"remote",
"host",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/server.py#L59-L75 | train | 22,846 |
Fizzadar/pyinfra | pyinfra/modules/server.py | script_template | def script_template(state, host, template_filename, chdir=None, **data):
'''
Generate, upload and execute a local script template on the remote host.
+ template_filename: local script template filename
+ chdir: directory to cd into before executing the script
'''
temp_file = state.get_temp_filename(template_filename)
yield files.template(state, host, template_filename, temp_file, **data)
yield chmod(temp_file, '+x')
if chdir:
yield 'cd {0} && {1}'.format(chdir, temp_file)
else:
yield temp_file | python | def script_template(state, host, template_filename, chdir=None, **data):
'''
Generate, upload and execute a local script template on the remote host.
+ template_filename: local script template filename
+ chdir: directory to cd into before executing the script
'''
temp_file = state.get_temp_filename(template_filename)
yield files.template(state, host, template_filename, temp_file, **data)
yield chmod(temp_file, '+x')
if chdir:
yield 'cd {0} && {1}'.format(chdir, temp_file)
else:
yield temp_file | [
"def",
"script_template",
"(",
"state",
",",
"host",
",",
"template_filename",
",",
"chdir",
"=",
"None",
",",
"*",
"*",
"data",
")",
":",
"temp_file",
"=",
"state",
".",
"get_temp_filename",
"(",
"template_filename",
")",
"yield",
"files",
".",
"template",
... | Generate, upload and execute a local script template on the remote host.
+ template_filename: local script template filename
+ chdir: directory to cd into before executing the script | [
"Generate",
"upload",
"and",
"execute",
"a",
"local",
"script",
"template",
"on",
"the",
"remote",
"host",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/server.py#L79-L95 | train | 22,847 |
Fizzadar/pyinfra | pyinfra/modules/server.py | hostname | def hostname(state, host, hostname, hostname_file=None):
'''
Set the system hostname.
+ hostname: the hostname that should be set
+ hostname_file: the file that permanently sets the hostname
Hostname file:
By default pyinfra will auto detect this by targetting ``/etc/hostname``
on Linux and ``/etc/myname`` on OpenBSD.
'''
if hostname_file is None:
os = host.fact.os
if os == 'Linux':
hostname_file = '/etc/hostname'
elif os == 'OpenBSD':
hostname_file = '/etc/myname'
current_hostname = host.fact.hostname
if current_hostname != hostname:
yield 'hostname {0}'.format(hostname)
if hostname_file:
# Create a whole new hostname file
file = six.StringIO('{0}\n'.format(hostname))
# And ensure it exists
yield files.put(
state, host,
file, hostname_file,
) | python | def hostname(state, host, hostname, hostname_file=None):
'''
Set the system hostname.
+ hostname: the hostname that should be set
+ hostname_file: the file that permanently sets the hostname
Hostname file:
By default pyinfra will auto detect this by targetting ``/etc/hostname``
on Linux and ``/etc/myname`` on OpenBSD.
'''
if hostname_file is None:
os = host.fact.os
if os == 'Linux':
hostname_file = '/etc/hostname'
elif os == 'OpenBSD':
hostname_file = '/etc/myname'
current_hostname = host.fact.hostname
if current_hostname != hostname:
yield 'hostname {0}'.format(hostname)
if hostname_file:
# Create a whole new hostname file
file = six.StringIO('{0}\n'.format(hostname))
# And ensure it exists
yield files.put(
state, host,
file, hostname_file,
) | [
"def",
"hostname",
"(",
"state",
",",
"host",
",",
"hostname",
",",
"hostname_file",
"=",
"None",
")",
":",
"if",
"hostname_file",
"is",
"None",
":",
"os",
"=",
"host",
".",
"fact",
".",
"os",
"if",
"os",
"==",
"'Linux'",
":",
"hostname_file",
"=",
"... | Set the system hostname.
+ hostname: the hostname that should be set
+ hostname_file: the file that permanently sets the hostname
Hostname file:
By default pyinfra will auto detect this by targetting ``/etc/hostname``
on Linux and ``/etc/myname`` on OpenBSD. | [
"Set",
"the",
"system",
"hostname",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/server.py#L125-L158 | train | 22,848 |
Fizzadar/pyinfra | pyinfra/modules/server.py | sysctl | def sysctl(
state, host, name, value,
persist=False, persist_file='/etc/sysctl.conf',
):
'''
Edit sysctl configuration.
+ name: name of the sysctl setting to ensure
+ value: the value or list of values the sysctl should be
+ persist: whether to write this sysctl to the config
+ persist_file: file to write the sysctl to persist on reboot
'''
string_value = (
' '.join(value)
if isinstance(value, list)
else value
)
existing_value = host.fact.sysctl.get(name)
if not existing_value or existing_value != value:
yield 'sysctl {0}={1}'.format(name, string_value)
if persist:
yield files.line(
state, host,
persist_file,
'{0}[[:space:]]*=[[:space:]]*{1}'.format(name, string_value),
replace='{0} = {1}'.format(name, string_value),
) | python | def sysctl(
state, host, name, value,
persist=False, persist_file='/etc/sysctl.conf',
):
'''
Edit sysctl configuration.
+ name: name of the sysctl setting to ensure
+ value: the value or list of values the sysctl should be
+ persist: whether to write this sysctl to the config
+ persist_file: file to write the sysctl to persist on reboot
'''
string_value = (
' '.join(value)
if isinstance(value, list)
else value
)
existing_value = host.fact.sysctl.get(name)
if not existing_value or existing_value != value:
yield 'sysctl {0}={1}'.format(name, string_value)
if persist:
yield files.line(
state, host,
persist_file,
'{0}[[:space:]]*=[[:space:]]*{1}'.format(name, string_value),
replace='{0} = {1}'.format(name, string_value),
) | [
"def",
"sysctl",
"(",
"state",
",",
"host",
",",
"name",
",",
"value",
",",
"persist",
"=",
"False",
",",
"persist_file",
"=",
"'/etc/sysctl.conf'",
",",
")",
":",
"string_value",
"=",
"(",
"' '",
".",
"join",
"(",
"value",
")",
"if",
"isinstance",
"("... | Edit sysctl configuration.
+ name: name of the sysctl setting to ensure
+ value: the value or list of values the sysctl should be
+ persist: whether to write this sysctl to the config
+ persist_file: file to write the sysctl to persist on reboot | [
"Edit",
"sysctl",
"configuration",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/server.py#L162-L191 | train | 22,849 |
Fizzadar/pyinfra | pyinfra/modules/files.py | download | def download(
state, host, source_url, destination,
user=None, group=None, mode=None, cache_time=None, force=False,
):
'''
Download files from remote locations.
+ source_url: source URl of the file
+ destination: where to save the file
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files
+ cache_time: if the file exists already, re-download after this time (in s)
+ force: always download the file, even if it already exists
'''
# Get destination info
info = host.fact.file(destination)
# Destination is a directory?
if info is False:
raise OperationError(
'Destination {0} already exists and is not a file'.format(destination),
)
# Do we download the file? Force by default
download = force
# Doesn't exist, lets download it
if info is None:
download = True
# Destination file exists & cache_time: check when the file was last modified,
# download if old
elif cache_time:
# Time on files is not tz-aware, and will be the same tz as the server's time,
# so we can safely remove the tzinfo from host.fact.date before comparison.
cache_time = host.fact.date.replace(tzinfo=None) - timedelta(seconds=cache_time)
if info['mtime'] and info['mtime'] > cache_time:
download = True
# If we download, always do user/group/mode as SSH user may be different
if download:
yield 'wget -q {0} -O {1}'.format(source_url, destination)
if user or group:
yield chown(destination, user, group)
if mode:
yield chmod(destination, mode) | python | def download(
state, host, source_url, destination,
user=None, group=None, mode=None, cache_time=None, force=False,
):
'''
Download files from remote locations.
+ source_url: source URl of the file
+ destination: where to save the file
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files
+ cache_time: if the file exists already, re-download after this time (in s)
+ force: always download the file, even if it already exists
'''
# Get destination info
info = host.fact.file(destination)
# Destination is a directory?
if info is False:
raise OperationError(
'Destination {0} already exists and is not a file'.format(destination),
)
# Do we download the file? Force by default
download = force
# Doesn't exist, lets download it
if info is None:
download = True
# Destination file exists & cache_time: check when the file was last modified,
# download if old
elif cache_time:
# Time on files is not tz-aware, and will be the same tz as the server's time,
# so we can safely remove the tzinfo from host.fact.date before comparison.
cache_time = host.fact.date.replace(tzinfo=None) - timedelta(seconds=cache_time)
if info['mtime'] and info['mtime'] > cache_time:
download = True
# If we download, always do user/group/mode as SSH user may be different
if download:
yield 'wget -q {0} -O {1}'.format(source_url, destination)
if user or group:
yield chown(destination, user, group)
if mode:
yield chmod(destination, mode) | [
"def",
"download",
"(",
"state",
",",
"host",
",",
"source_url",
",",
"destination",
",",
"user",
"=",
"None",
",",
"group",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"cache_time",
"=",
"None",
",",
"force",
"=",
"False",
",",
")",
":",
"# Get dest... | Download files from remote locations.
+ source_url: source URl of the file
+ destination: where to save the file
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files
+ cache_time: if the file exists already, re-download after this time (in s)
+ force: always download the file, even if it already exists | [
"Download",
"files",
"from",
"remote",
"locations",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/files.py#L30-L79 | train | 22,850 |
Fizzadar/pyinfra | pyinfra/modules/files.py | replace | def replace(state, host, name, match, replace, flags=None):
'''
A simple shortcut for replacing text in files with sed.
+ name: target remote file to edit
+ match: text/regex to match for
+ replace: text to replace with
+ flags: list of flaggs to pass to sed
'''
yield sed_replace(name, match, replace, flags=flags) | python | def replace(state, host, name, match, replace, flags=None):
'''
A simple shortcut for replacing text in files with sed.
+ name: target remote file to edit
+ match: text/regex to match for
+ replace: text to replace with
+ flags: list of flaggs to pass to sed
'''
yield sed_replace(name, match, replace, flags=flags) | [
"def",
"replace",
"(",
"state",
",",
"host",
",",
"name",
",",
"match",
",",
"replace",
",",
"flags",
"=",
"None",
")",
":",
"yield",
"sed_replace",
"(",
"name",
",",
"match",
",",
"replace",
",",
"flags",
"=",
"flags",
")"
] | A simple shortcut for replacing text in files with sed.
+ name: target remote file to edit
+ match: text/regex to match for
+ replace: text to replace with
+ flags: list of flaggs to pass to sed | [
"A",
"simple",
"shortcut",
"for",
"replacing",
"text",
"in",
"files",
"with",
"sed",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/files.py#L169-L179 | train | 22,851 |
Fizzadar/pyinfra | pyinfra/modules/files.py | sync | def sync(
state, host, source, destination,
user=None, group=None, mode=None, delete=False,
exclude=None, exclude_dir=None, add_deploy_dir=True,
):
'''
Syncs a local directory with a remote one, with delete support. Note that delete will
remove extra files on the remote side, but not extra directories.
+ source: local directory to sync
+ destination: remote directory to sync to
+ user: user to own the files and directories
+ group: group to own the files and directories
+ mode: permissions of the files
+ delete: delete remote files not present locally
+ exclude: string or list/tuple of strings to match & exclude files (eg *.pyc)
+ exclude_dir: string or list/tuple of strings to match & exclude directories (eg node_modules)
'''
# If we don't enforce the source ending with /, remote_dirname below might start with
# a /, which makes the path.join cut off the destination bit.
if not source.endswith(path.sep):
source = '{0}{1}'.format(source, path.sep)
# Add deploy directory?
if add_deploy_dir and state.deploy_dir:
source = path.join(state.deploy_dir, source)
# Ensure the source directory exists
if not path.isdir(source):
raise IOError('No such directory: {0}'.format(source))
# Ensure exclude is a list/tuple
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
exclude = [exclude]
# Ensure exclude_dir is a list/tuple
if exclude_dir is not None:
if not isinstance(exclude_dir, (list, tuple)):
exclude_dir = [exclude_dir]
put_files = []
ensure_dirnames = []
for dirname, _, filenames in walk(source):
remote_dirname = dirname.replace(source, '')
# Should we exclude this dir?
if exclude_dir and any(fnmatch(remote_dirname, match) for match in exclude_dir):
continue
if remote_dirname:
ensure_dirnames.append(remote_dirname)
for filename in filenames:
full_filename = path.join(dirname, filename)
# Should we exclude this file?
if exclude and any(fnmatch(full_filename, match) for match in exclude):
continue
put_files.append((
# Join local as normal (unix, win)
full_filename,
# Join remote as unix like
'/'.join(
item for item in
(destination, remote_dirname, filename)
if item
),
))
# Ensure the destination directory
yield directory(
state, host, destination,
user=user, group=group,
)
# Ensure any remote dirnames
for dirname in ensure_dirnames:
yield directory(
state, host,
'/'.join((destination, dirname)),
user=user, group=group,
)
# Put each file combination
for local_filename, remote_filename in put_files:
yield put(
state, host,
local_filename, remote_filename,
user=user, group=group, mode=mode,
add_deploy_dir=False,
)
# Delete any extra files
if delete:
remote_filenames = set(host.fact.find_files(destination) or [])
wanted_filenames = set([remote_filename for _, remote_filename in put_files])
files_to_delete = remote_filenames - wanted_filenames
for filename in files_to_delete:
# Should we exclude this file?
if exclude and any(fnmatch(filename, match) for match in exclude):
continue
yield file(state, host, filename, present=False) | python | def sync(
state, host, source, destination,
user=None, group=None, mode=None, delete=False,
exclude=None, exclude_dir=None, add_deploy_dir=True,
):
'''
Syncs a local directory with a remote one, with delete support. Note that delete will
remove extra files on the remote side, but not extra directories.
+ source: local directory to sync
+ destination: remote directory to sync to
+ user: user to own the files and directories
+ group: group to own the files and directories
+ mode: permissions of the files
+ delete: delete remote files not present locally
+ exclude: string or list/tuple of strings to match & exclude files (eg *.pyc)
+ exclude_dir: string or list/tuple of strings to match & exclude directories (eg node_modules)
'''
# If we don't enforce the source ending with /, remote_dirname below might start with
# a /, which makes the path.join cut off the destination bit.
if not source.endswith(path.sep):
source = '{0}{1}'.format(source, path.sep)
# Add deploy directory?
if add_deploy_dir and state.deploy_dir:
source = path.join(state.deploy_dir, source)
# Ensure the source directory exists
if not path.isdir(source):
raise IOError('No such directory: {0}'.format(source))
# Ensure exclude is a list/tuple
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
exclude = [exclude]
# Ensure exclude_dir is a list/tuple
if exclude_dir is not None:
if not isinstance(exclude_dir, (list, tuple)):
exclude_dir = [exclude_dir]
put_files = []
ensure_dirnames = []
for dirname, _, filenames in walk(source):
remote_dirname = dirname.replace(source, '')
# Should we exclude this dir?
if exclude_dir and any(fnmatch(remote_dirname, match) for match in exclude_dir):
continue
if remote_dirname:
ensure_dirnames.append(remote_dirname)
for filename in filenames:
full_filename = path.join(dirname, filename)
# Should we exclude this file?
if exclude and any(fnmatch(full_filename, match) for match in exclude):
continue
put_files.append((
# Join local as normal (unix, win)
full_filename,
# Join remote as unix like
'/'.join(
item for item in
(destination, remote_dirname, filename)
if item
),
))
# Ensure the destination directory
yield directory(
state, host, destination,
user=user, group=group,
)
# Ensure any remote dirnames
for dirname in ensure_dirnames:
yield directory(
state, host,
'/'.join((destination, dirname)),
user=user, group=group,
)
# Put each file combination
for local_filename, remote_filename in put_files:
yield put(
state, host,
local_filename, remote_filename,
user=user, group=group, mode=mode,
add_deploy_dir=False,
)
# Delete any extra files
if delete:
remote_filenames = set(host.fact.find_files(destination) or [])
wanted_filenames = set([remote_filename for _, remote_filename in put_files])
files_to_delete = remote_filenames - wanted_filenames
for filename in files_to_delete:
# Should we exclude this file?
if exclude and any(fnmatch(filename, match) for match in exclude):
continue
yield file(state, host, filename, present=False) | [
"def",
"sync",
"(",
"state",
",",
"host",
",",
"source",
",",
"destination",
",",
"user",
"=",
"None",
",",
"group",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"delete",
"=",
"False",
",",
"exclude",
"=",
"None",
",",
"exclude_dir",
"=",
"None",
"... | Syncs a local directory with a remote one, with delete support. Note that delete will
remove extra files on the remote side, but not extra directories.
+ source: local directory to sync
+ destination: remote directory to sync to
+ user: user to own the files and directories
+ group: group to own the files and directories
+ mode: permissions of the files
+ delete: delete remote files not present locally
+ exclude: string or list/tuple of strings to match & exclude files (eg *.pyc)
+ exclude_dir: string or list/tuple of strings to match & exclude directories (eg node_modules) | [
"Syncs",
"a",
"local",
"directory",
"with",
"a",
"remote",
"one",
"with",
"delete",
"support",
".",
"Note",
"that",
"delete",
"will",
"remove",
"extra",
"files",
"on",
"the",
"remote",
"side",
"but",
"not",
"extra",
"directories",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/files.py#L185-L290 | train | 22,852 |
Fizzadar/pyinfra | pyinfra/modules/files.py | put | def put(
state, host, local_filename, remote_filename,
user=None, group=None, mode=None, add_deploy_dir=True,
):
'''
Copy a local file to the remote system.
+ local_filename: local filename
+ remote_filename: remote filename
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files
'''
# Upload IO objects as-is
if hasattr(local_filename, 'read'):
local_file = local_filename
# Assume string filename
else:
# Add deploy directory?
if add_deploy_dir and state.deploy_dir:
local_filename = path.join(state.deploy_dir, local_filename)
local_file = local_filename
if not path.isfile(local_file):
raise IOError('No such file: {0}'.format(local_file))
mode = ensure_mode_int(mode)
remote_file = host.fact.file(remote_filename)
# No remote file, always upload and user/group/mode if supplied
if not remote_file:
yield (local_file, remote_filename)
if user or group:
yield chown(remote_filename, user, group)
if mode:
yield chmod(remote_filename, mode)
# File exists, check sum and check user/group/mode if supplied
else:
local_sum = get_file_sha1(local_filename)
remote_sum = host.fact.sha1_file(remote_filename)
# Check sha1sum, upload if needed
if local_sum != remote_sum:
yield (local_file, remote_filename)
if user or group:
yield chown(remote_filename, user, group)
if mode:
yield chmod(remote_filename, mode)
else:
# Check mode
if mode and remote_file['mode'] != mode:
yield chmod(remote_filename, mode)
# Check user/group
if (
(user and remote_file['user'] != user)
or (group and remote_file['group'] != group)
):
yield chown(remote_filename, user, group) | python | def put(
state, host, local_filename, remote_filename,
user=None, group=None, mode=None, add_deploy_dir=True,
):
'''
Copy a local file to the remote system.
+ local_filename: local filename
+ remote_filename: remote filename
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files
'''
# Upload IO objects as-is
if hasattr(local_filename, 'read'):
local_file = local_filename
# Assume string filename
else:
# Add deploy directory?
if add_deploy_dir and state.deploy_dir:
local_filename = path.join(state.deploy_dir, local_filename)
local_file = local_filename
if not path.isfile(local_file):
raise IOError('No such file: {0}'.format(local_file))
mode = ensure_mode_int(mode)
remote_file = host.fact.file(remote_filename)
# No remote file, always upload and user/group/mode if supplied
if not remote_file:
yield (local_file, remote_filename)
if user or group:
yield chown(remote_filename, user, group)
if mode:
yield chmod(remote_filename, mode)
# File exists, check sum and check user/group/mode if supplied
else:
local_sum = get_file_sha1(local_filename)
remote_sum = host.fact.sha1_file(remote_filename)
# Check sha1sum, upload if needed
if local_sum != remote_sum:
yield (local_file, remote_filename)
if user or group:
yield chown(remote_filename, user, group)
if mode:
yield chmod(remote_filename, mode)
else:
# Check mode
if mode and remote_file['mode'] != mode:
yield chmod(remote_filename, mode)
# Check user/group
if (
(user and remote_file['user'] != user)
or (group and remote_file['group'] != group)
):
yield chown(remote_filename, user, group) | [
"def",
"put",
"(",
"state",
",",
"host",
",",
"local_filename",
",",
"remote_filename",
",",
"user",
"=",
"None",
",",
"group",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"add_deploy_dir",
"=",
"True",
",",
")",
":",
"# Upload IO objects as-is",
"if",
"... | Copy a local file to the remote system.
+ local_filename: local filename
+ remote_filename: remote filename
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files | [
"Copy",
"a",
"local",
"file",
"to",
"the",
"remote",
"system",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/files.py#L297-L364 | train | 22,853 |
Fizzadar/pyinfra | pyinfra/modules/files.py | template | def template(
state, host, template_filename, remote_filename,
user=None, group=None, mode=None, **data
):
'''
Generate a template and write it to the remote system.
+ template_filename: local template filename
+ remote_filename: remote filename
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files
'''
if state.deploy_dir:
template_filename = path.join(state.deploy_dir, template_filename)
# Ensure host is always available inside templates
data['host'] = host
data['inventory'] = state.inventory
# Render and make file-like it's output
try:
output = get_template(template_filename).render(data)
except (TemplateSyntaxError, UndefinedError) as e:
_, _, trace = sys.exc_info()
# Jump through to the *second last* traceback, which contains the line number
# of the error within the in-memory Template object
while trace.tb_next:
if trace.tb_next.tb_next:
trace = trace.tb_next
else:
break
line_number = trace.tb_frame.f_lineno
# Quickly read the line in question and one above/below for nicer debugging
template_lines = open(template_filename, 'r').readlines()
template_lines = [line.strip() for line in template_lines]
relevant_lines = template_lines[max(line_number - 2, 0):line_number + 1]
raise OperationError('Error in template: {0} (L{1}): {2}\n...\n{3}\n...'.format(
template_filename, line_number, e, '\n'.join(relevant_lines),
))
output_file = six.StringIO(output)
# Set the template attribute for nicer debugging
output_file.template = template_filename
# Pass to the put function
yield put(
state, host,
output_file, remote_filename,
user=user, group=group, mode=mode,
add_deploy_dir=False,
) | python | def template(
state, host, template_filename, remote_filename,
user=None, group=None, mode=None, **data
):
'''
Generate a template and write it to the remote system.
+ template_filename: local template filename
+ remote_filename: remote filename
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files
'''
if state.deploy_dir:
template_filename = path.join(state.deploy_dir, template_filename)
# Ensure host is always available inside templates
data['host'] = host
data['inventory'] = state.inventory
# Render and make file-like it's output
try:
output = get_template(template_filename).render(data)
except (TemplateSyntaxError, UndefinedError) as e:
_, _, trace = sys.exc_info()
# Jump through to the *second last* traceback, which contains the line number
# of the error within the in-memory Template object
while trace.tb_next:
if trace.tb_next.tb_next:
trace = trace.tb_next
else:
break
line_number = trace.tb_frame.f_lineno
# Quickly read the line in question and one above/below for nicer debugging
template_lines = open(template_filename, 'r').readlines()
template_lines = [line.strip() for line in template_lines]
relevant_lines = template_lines[max(line_number - 2, 0):line_number + 1]
raise OperationError('Error in template: {0} (L{1}): {2}\n...\n{3}\n...'.format(
template_filename, line_number, e, '\n'.join(relevant_lines),
))
output_file = six.StringIO(output)
# Set the template attribute for nicer debugging
output_file.template = template_filename
# Pass to the put function
yield put(
state, host,
output_file, remote_filename,
user=user, group=group, mode=mode,
add_deploy_dir=False,
) | [
"def",
"template",
"(",
"state",
",",
"host",
",",
"template_filename",
",",
"remote_filename",
",",
"user",
"=",
"None",
",",
"group",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"*",
"*",
"data",
")",
":",
"if",
"state",
".",
"deploy_dir",
":",
"te... | Generate a template and write it to the remote system.
+ template_filename: local template filename
+ remote_filename: remote filename
+ user: user to own the files
+ group: group to own the files
+ mode: permissions of the files | [
"Generate",
"a",
"template",
"and",
"write",
"it",
"to",
"the",
"remote",
"system",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/files.py#L368-L424 | train | 22,854 |
Fizzadar/pyinfra | pyinfra/modules/postgresql.py | sql | def sql(
state, host, sql,
database=None,
# Details for speaking to PostgreSQL via `psql` CLI
postgresql_user=None, postgresql_password=None,
postgresql_host=None, postgresql_port=None,
):
'''
Execute arbitrary SQL against PostgreSQL.
+ sql: SQL command(s) to execute
+ database: optional database to execute against
+ postgresql_*: global module arguments, see above
'''
yield make_execute_psql_command(
sql,
database=database,
user=postgresql_user,
password=postgresql_password,
host=postgresql_host,
port=postgresql_port,
) | python | def sql(
state, host, sql,
database=None,
# Details for speaking to PostgreSQL via `psql` CLI
postgresql_user=None, postgresql_password=None,
postgresql_host=None, postgresql_port=None,
):
'''
Execute arbitrary SQL against PostgreSQL.
+ sql: SQL command(s) to execute
+ database: optional database to execute against
+ postgresql_*: global module arguments, see above
'''
yield make_execute_psql_command(
sql,
database=database,
user=postgresql_user,
password=postgresql_password,
host=postgresql_host,
port=postgresql_port,
) | [
"def",
"sql",
"(",
"state",
",",
"host",
",",
"sql",
",",
"database",
"=",
"None",
",",
"# Details for speaking to PostgreSQL via `psql` CLI",
"postgresql_user",
"=",
"None",
",",
"postgresql_password",
"=",
"None",
",",
"postgresql_host",
"=",
"None",
",",
"postg... | Execute arbitrary SQL against PostgreSQL.
+ sql: SQL command(s) to execute
+ database: optional database to execute against
+ postgresql_*: global module arguments, see above | [
"Execute",
"arbitrary",
"SQL",
"against",
"PostgreSQL",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/postgresql.py#L22-L44 | train | 22,855 |
Fizzadar/pyinfra | pyinfra/modules/postgresql.py | dump | def dump(
state, host,
remote_filename, database=None,
# Details for speaking to PostgreSQL via `psql` CLI
postgresql_user=None, postgresql_password=None,
postgresql_host=None, postgresql_port=None,
):
'''
Dump a PostgreSQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ postgresql_*: global module arguments, see above
'''
yield '{0} > {1}'.format(make_psql_command(
executable='pg_dump',
database=database,
user=postgresql_user,
password=postgresql_password,
host=postgresql_host,
port=postgresql_port,
), remote_filename) | python | def dump(
state, host,
remote_filename, database=None,
# Details for speaking to PostgreSQL via `psql` CLI
postgresql_user=None, postgresql_password=None,
postgresql_host=None, postgresql_port=None,
):
'''
Dump a PostgreSQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ postgresql_*: global module arguments, see above
'''
yield '{0} > {1}'.format(make_psql_command(
executable='pg_dump',
database=database,
user=postgresql_user,
password=postgresql_password,
host=postgresql_host,
port=postgresql_port,
), remote_filename) | [
"def",
"dump",
"(",
"state",
",",
"host",
",",
"remote_filename",
",",
"database",
"=",
"None",
",",
"# Details for speaking to PostgreSQL via `psql` CLI",
"postgresql_user",
"=",
"None",
",",
"postgresql_password",
"=",
"None",
",",
"postgresql_host",
"=",
"None",
... | Dump a PostgreSQL database into a ``.sql`` file. Requires ``mysqldump``.
+ database: name of the database to dump
+ remote_filename: name of the file to dump the SQL to
+ postgresql_*: global module arguments, see above | [
"Dump",
"a",
"PostgreSQL",
"database",
"into",
"a",
".",
"sql",
"file",
".",
"Requires",
"mysqldump",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/postgresql.py#L201-L223 | train | 22,856 |
Fizzadar/pyinfra | pyinfra/api/facts.py | get_fact | def get_fact(state, host, name):
'''
Wrapper around ``get_facts`` returning facts for one host or a function
that does.
'''
# Expecting a function to return
if callable(getattr(FACTS[name], 'command', None)):
def wrapper(*args):
fact_data = get_facts(state, name, args=args, ensure_hosts=(host,))
return fact_data.get(host)
return wrapper
# Expecting the fact as a return value
else:
# Get the fact
fact_data = get_facts(state, name, ensure_hosts=(host,))
return fact_data.get(host) | python | def get_fact(state, host, name):
'''
Wrapper around ``get_facts`` returning facts for one host or a function
that does.
'''
# Expecting a function to return
if callable(getattr(FACTS[name], 'command', None)):
def wrapper(*args):
fact_data = get_facts(state, name, args=args, ensure_hosts=(host,))
return fact_data.get(host)
return wrapper
# Expecting the fact as a return value
else:
# Get the fact
fact_data = get_facts(state, name, ensure_hosts=(host,))
return fact_data.get(host) | [
"def",
"get_fact",
"(",
"state",
",",
"host",
",",
"name",
")",
":",
"# Expecting a function to return",
"if",
"callable",
"(",
"getattr",
"(",
"FACTS",
"[",
"name",
"]",
",",
"'command'",
",",
"None",
")",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
... | Wrapper around ``get_facts`` returning facts for one host or a function
that does. | [
"Wrapper",
"around",
"get_facts",
"returning",
"facts",
"for",
"one",
"host",
"or",
"a",
"function",
"that",
"does",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/facts.py#L249-L268 | train | 22,857 |
Fizzadar/pyinfra | pyinfra/modules/apt.py | key | def key(state, host, key=None, keyserver=None, keyid=None):
'''
Add apt gpg keys with ``apt-key``.
+ key: filename or URL
+ keyserver: URL of keyserver to fetch key from
+ keyid: key identifier when using keyserver
Note:
Always returns an add command, not state checking.
keyserver/id:
These must be provided together.
'''
if key:
# If URL, wget the key to stdout and pipe into apt-key, because the "adv"
# apt-key passes to gpg which doesn't always support https!
if urlparse(key).scheme:
yield 'wget -O- {0} | apt-key add -'.format(key)
else:
yield 'apt-key add {0}'.format(key)
if keyserver and keyid:
yield 'apt-key adv --keyserver {0} --recv-keys {1}'.format(keyserver, keyid) | python | def key(state, host, key=None, keyserver=None, keyid=None):
'''
Add apt gpg keys with ``apt-key``.
+ key: filename or URL
+ keyserver: URL of keyserver to fetch key from
+ keyid: key identifier when using keyserver
Note:
Always returns an add command, not state checking.
keyserver/id:
These must be provided together.
'''
if key:
# If URL, wget the key to stdout and pipe into apt-key, because the "adv"
# apt-key passes to gpg which doesn't always support https!
if urlparse(key).scheme:
yield 'wget -O- {0} | apt-key add -'.format(key)
else:
yield 'apt-key add {0}'.format(key)
if keyserver and keyid:
yield 'apt-key adv --keyserver {0} --recv-keys {1}'.format(keyserver, keyid) | [
"def",
"key",
"(",
"state",
",",
"host",
",",
"key",
"=",
"None",
",",
"keyserver",
"=",
"None",
",",
"keyid",
"=",
"None",
")",
":",
"if",
"key",
":",
"# If URL, wget the key to stdout and pipe into apt-key, because the \"adv\"",
"# apt-key passes to gpg which doesn'... | Add apt gpg keys with ``apt-key``.
+ key: filename or URL
+ keyserver: URL of keyserver to fetch key from
+ keyid: key identifier when using keyserver
Note:
Always returns an add command, not state checking.
keyserver/id:
These must be provided together. | [
"Add",
"apt",
"gpg",
"keys",
"with",
"apt",
"-",
"key",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/apt.py#L40-L64 | train | 22,858 |
Fizzadar/pyinfra | pyinfra/modules/apt.py | update | def update(state, host, cache_time=None, touch_periodic=False):
'''
Updates apt repos.
+ cache_time: cache updates for this many seconds
+ touch_periodic: touch ``/var/lib/apt/periodic/update-success-stamp`` after update
'''
# If cache_time check when apt was last updated, prevent updates if within time
if cache_time:
# Ubuntu provides this handy file
cache_info = host.fact.file(APT_UPDATE_FILENAME)
# Time on files is not tz-aware, and will be the same tz as the server's time,
# so we can safely remove the tzinfo from host.fact.date before comparison.
host_cache_time = host.fact.date.replace(tzinfo=None) - timedelta(seconds=cache_time)
if cache_info and cache_info['mtime'] and cache_info['mtime'] > host_cache_time:
return
yield 'apt-get update'
# Some apt systems (Debian) have the /var/lib/apt/periodic directory, but
# don't bother touching anything in there - so pyinfra does it, enabling
# cache_time to work.
if cache_time:
yield 'touch {0}'.format(APT_UPDATE_FILENAME) | python | def update(state, host, cache_time=None, touch_periodic=False):
'''
Updates apt repos.
+ cache_time: cache updates for this many seconds
+ touch_periodic: touch ``/var/lib/apt/periodic/update-success-stamp`` after update
'''
# If cache_time check when apt was last updated, prevent updates if within time
if cache_time:
# Ubuntu provides this handy file
cache_info = host.fact.file(APT_UPDATE_FILENAME)
# Time on files is not tz-aware, and will be the same tz as the server's time,
# so we can safely remove the tzinfo from host.fact.date before comparison.
host_cache_time = host.fact.date.replace(tzinfo=None) - timedelta(seconds=cache_time)
if cache_info and cache_info['mtime'] and cache_info['mtime'] > host_cache_time:
return
yield 'apt-get update'
# Some apt systems (Debian) have the /var/lib/apt/periodic directory, but
# don't bother touching anything in there - so pyinfra does it, enabling
# cache_time to work.
if cache_time:
yield 'touch {0}'.format(APT_UPDATE_FILENAME) | [
"def",
"update",
"(",
"state",
",",
"host",
",",
"cache_time",
"=",
"None",
",",
"touch_periodic",
"=",
"False",
")",
":",
"# If cache_time check when apt was last updated, prevent updates if within time",
"if",
"cache_time",
":",
"# Ubuntu provides this handy file",
"cache... | Updates apt repos.
+ cache_time: cache updates for this many seconds
+ touch_periodic: touch ``/var/lib/apt/periodic/update-success-stamp`` after update | [
"Updates",
"apt",
"repos",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/apt.py#L188-L213 | train | 22,859 |
Fizzadar/pyinfra | pyinfra/api/connectors/local.py | run_shell_command | def run_shell_command(
state, host, command,
get_pty=False, timeout=None, print_output=False,
**command_kwargs
):
'''
Execute a command on the local machine.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
get_pty (boolean): whether to get a PTY before executing the command
env (dict): envrionment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
'''
command = make_command(command, **command_kwargs)
logger.debug('--> Running command on localhost: {0}'.format(command))
if print_output:
print('{0}>>> {1}'.format(host.print_prefix, command))
process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
# Iterate through outputs to get an exit status and generate desired list
# output, done in two greenlets so stdout isn't printed before stderr. Not
# attached to state.pool to avoid blocking it with 2x n-hosts greenlets.
stdout_reader = gevent.spawn(
read_buffer, process.stdout,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(host.print_prefix, line),
)
stderr_reader = gevent.spawn(
read_buffer, process.stderr,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(
host.print_prefix, click.style(line, 'red'),
),
)
# Wait on output, with our timeout (or None)
greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout)
# Timeout doesn't raise an exception, but gevent.wait returns the greenlets
# which did complete. So if both haven't completed, we kill them and fail
# with a timeout.
if len(greenlets) != 2:
stdout_reader.kill()
stderr_reader.kill()
raise timeout_error()
# Read the buffers into a list of lines
stdout = stdout_reader.get()
stderr = stderr_reader.get()
logger.debug('--> Waiting for exit status...')
process.wait()
# Close any open file descriptor
process.stdout.close()
logger.debug('--> Command exit status: {0}'.format(process.returncode))
return process.returncode == 0, stdout, stderr | python | def run_shell_command(
state, host, command,
get_pty=False, timeout=None, print_output=False,
**command_kwargs
):
'''
Execute a command on the local machine.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
get_pty (boolean): whether to get a PTY before executing the command
env (dict): envrionment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
'''
command = make_command(command, **command_kwargs)
logger.debug('--> Running command on localhost: {0}'.format(command))
if print_output:
print('{0}>>> {1}'.format(host.print_prefix, command))
process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
# Iterate through outputs to get an exit status and generate desired list
# output, done in two greenlets so stdout isn't printed before stderr. Not
# attached to state.pool to avoid blocking it with 2x n-hosts greenlets.
stdout_reader = gevent.spawn(
read_buffer, process.stdout,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(host.print_prefix, line),
)
stderr_reader = gevent.spawn(
read_buffer, process.stderr,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(
host.print_prefix, click.style(line, 'red'),
),
)
# Wait on output, with our timeout (or None)
greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout)
# Timeout doesn't raise an exception, but gevent.wait returns the greenlets
# which did complete. So if both haven't completed, we kill them and fail
# with a timeout.
if len(greenlets) != 2:
stdout_reader.kill()
stderr_reader.kill()
raise timeout_error()
# Read the buffers into a list of lines
stdout = stdout_reader.get()
stderr = stderr_reader.get()
logger.debug('--> Waiting for exit status...')
process.wait()
# Close any open file descriptor
process.stdout.close()
logger.debug('--> Command exit status: {0}'.format(process.returncode))
return process.returncode == 0, stdout, stderr | [
"def",
"run_shell_command",
"(",
"state",
",",
"host",
",",
"command",
",",
"get_pty",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"print_output",
"=",
"False",
",",
"*",
"*",
"command_kwargs",
")",
":",
"command",
"=",
"make_command",
"(",
"command",
... | Execute a command on the local machine.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
get_pty (boolean): whether to get a PTY before executing the command
env (dict): envrionment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer. | [
"Execute",
"a",
"command",
"on",
"the",
"local",
"machine",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/connectors/local.py#L39-L110 | train | 22,860 |
Fizzadar/pyinfra | pyinfra/modules/init.py | upstart | def upstart(
state, host, name,
running=True, restarted=False, reloaded=False,
command=None, enabled=None,
):
'''
Manage the state of upstart managed services.
+ name: name of the service to manage
+ running: whether the service should be running
+ restarted: whether the service should be restarted
+ reloaded: whether the service should be reloaded
+ command: custom command to pass like: ``/etc/rc.d/<name> <command>``
+ enabled: whether this service should be enabled/disabled on boot
Enabling/disabling services:
Upstart jobs define runlevels in their config files - as such there is no way to
edit/list these without fiddling with the config. So pyinfra simply manages the
existence of a ``/etc/init/<service>.override`` file, and sets its content to
"manual" to disable automatic start of services.
'''
yield _handle_service_control(
name, host.fact.upstart_status,
'initctl {1} {0}',
running, restarted, reloaded, command,
)
# Upstart jobs are setup w/runlevels etc in their config files, so here we just check
# there's no override file.
if enabled is True:
yield files.file(
state, host,
'/etc/init/{0}.override'.format(name),
present=False,
)
# Set the override file to "manual" to disable automatic start
elif enabled is False:
yield 'echo "manual" > /etc/init/{0}.override'.format(name) | python | def upstart(
state, host, name,
running=True, restarted=False, reloaded=False,
command=None, enabled=None,
):
'''
Manage the state of upstart managed services.
+ name: name of the service to manage
+ running: whether the service should be running
+ restarted: whether the service should be restarted
+ reloaded: whether the service should be reloaded
+ command: custom command to pass like: ``/etc/rc.d/<name> <command>``
+ enabled: whether this service should be enabled/disabled on boot
Enabling/disabling services:
Upstart jobs define runlevels in their config files - as such there is no way to
edit/list these without fiddling with the config. So pyinfra simply manages the
existence of a ``/etc/init/<service>.override`` file, and sets its content to
"manual" to disable automatic start of services.
'''
yield _handle_service_control(
name, host.fact.upstart_status,
'initctl {1} {0}',
running, restarted, reloaded, command,
)
# Upstart jobs are setup w/runlevels etc in their config files, so here we just check
# there's no override file.
if enabled is True:
yield files.file(
state, host,
'/etc/init/{0}.override'.format(name),
present=False,
)
# Set the override file to "manual" to disable automatic start
elif enabled is False:
yield 'echo "manual" > /etc/init/{0}.override'.format(name) | [
"def",
"upstart",
"(",
"state",
",",
"host",
",",
"name",
",",
"running",
"=",
"True",
",",
"restarted",
"=",
"False",
",",
"reloaded",
"=",
"False",
",",
"command",
"=",
"None",
",",
"enabled",
"=",
"None",
",",
")",
":",
"yield",
"_handle_service_con... | Manage the state of upstart managed services.
+ name: name of the service to manage
+ running: whether the service should be running
+ restarted: whether the service should be restarted
+ reloaded: whether the service should be reloaded
+ command: custom command to pass like: ``/etc/rc.d/<name> <command>``
+ enabled: whether this service should be enabled/disabled on boot
Enabling/disabling services:
Upstart jobs define runlevels in their config files - as such there is no way to
edit/list these without fiddling with the config. So pyinfra simply manages the
existence of a ``/etc/init/<service>.override`` file, and sets its content to
"manual" to disable automatic start of services. | [
"Manage",
"the",
"state",
"of",
"upstart",
"managed",
"services",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/init.py#L209-L248 | train | 22,861 |
Fizzadar/pyinfra | pyinfra/modules/init.py | service | def service(
state, host,
*args, **kwargs
):
'''
Manage the state of services. This command checks for the presence of all the
init systems pyinfra can handle and executes the relevant operation. See init
system sepcific operation for arguments.
'''
if host.fact.which('systemctl'):
yield systemd(state, host, *args, **kwargs)
return
if host.fact.which('initctl'):
yield upstart(state, host, *args, **kwargs)
return
if host.fact.directory('/etc/init.d'):
yield d(state, host, *args, **kwargs)
return
if host.fact.directory('/etc/rc.d'):
yield rc(state, host, *args, **kwargs)
return
raise OperationError((
'No init system found '
'(no systemctl, initctl, /etc/init.d or /etc/rc.d found)'
)) | python | def service(
state, host,
*args, **kwargs
):
'''
Manage the state of services. This command checks for the presence of all the
init systems pyinfra can handle and executes the relevant operation. See init
system sepcific operation for arguments.
'''
if host.fact.which('systemctl'):
yield systemd(state, host, *args, **kwargs)
return
if host.fact.which('initctl'):
yield upstart(state, host, *args, **kwargs)
return
if host.fact.directory('/etc/init.d'):
yield d(state, host, *args, **kwargs)
return
if host.fact.directory('/etc/rc.d'):
yield rc(state, host, *args, **kwargs)
return
raise OperationError((
'No init system found '
'(no systemctl, initctl, /etc/init.d or /etc/rc.d found)'
)) | [
"def",
"service",
"(",
"state",
",",
"host",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"host",
".",
"fact",
".",
"which",
"(",
"'systemctl'",
")",
":",
"yield",
"systemd",
"(",
"state",
",",
"host",
",",
"*",
"args",
",",
"*",
... | Manage the state of services. This command checks for the presence of all the
init systems pyinfra can handle and executes the relevant operation. See init
system sepcific operation for arguments. | [
"Manage",
"the",
"state",
"of",
"services",
".",
"This",
"command",
"checks",
"for",
"the",
"presence",
"of",
"all",
"the",
"init",
"systems",
"pyinfra",
"can",
"handle",
"and",
"executes",
"the",
"relevant",
"operation",
".",
"See",
"init",
"system",
"sepci... | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/modules/init.py#L321-L350 | train | 22,862 |
Fizzadar/pyinfra | pyinfra/api/connectors/ssh.py | connect | def connect(state, host, for_fact=None):
'''
Connect to a single host. Returns the SSH client if succesful. Stateless by
design so can be run in parallel.
'''
kwargs = _make_paramiko_kwargs(state, host)
logger.debug('Connecting to: {0} ({1})'.format(host.name, kwargs))
# Hostname can be provided via SSH config (alias), data, or the hosts name
hostname = kwargs.pop(
'hostname',
host.data.ssh_hostname or host.name,
)
try:
# Create new client & connect to the host
client = SSHClient()
client.set_missing_host_key_policy(MissingHostKeyPolicy())
client.connect(hostname, **kwargs)
# Enable SSH forwarding
session = client.get_transport().open_session()
AgentRequestHandler(session)
# Log
log_message = '{0}{1}'.format(
host.print_prefix,
click.style('Connected', 'green'),
)
if for_fact:
log_message = '{0}{1}'.format(
log_message,
' (for {0} fact)'.format(for_fact),
)
logger.info(log_message)
return client
except AuthenticationException:
auth_kwargs = {}
for key, value in kwargs.items():
if key in ('username', 'password'):
auth_kwargs[key] = value
continue
if key == 'pkey' and value:
auth_kwargs['key'] = host.data.ssh_key
auth_args = ', '.join(
'{0}={1}'.format(key, value)
for key, value in auth_kwargs.items()
)
_log_connect_error(host, 'Authentication error', auth_args)
except SSHException as e:
_log_connect_error(host, 'SSH error', e)
except gaierror:
_log_connect_error(host, 'Could not resolve hostname', hostname)
except socket_error as e:
_log_connect_error(host, 'Could not connect', e)
except EOFError as e:
_log_connect_error(host, 'EOF error', e) | python | def connect(state, host, for_fact=None):
'''
Connect to a single host. Returns the SSH client if succesful. Stateless by
design so can be run in parallel.
'''
kwargs = _make_paramiko_kwargs(state, host)
logger.debug('Connecting to: {0} ({1})'.format(host.name, kwargs))
# Hostname can be provided via SSH config (alias), data, or the hosts name
hostname = kwargs.pop(
'hostname',
host.data.ssh_hostname or host.name,
)
try:
# Create new client & connect to the host
client = SSHClient()
client.set_missing_host_key_policy(MissingHostKeyPolicy())
client.connect(hostname, **kwargs)
# Enable SSH forwarding
session = client.get_transport().open_session()
AgentRequestHandler(session)
# Log
log_message = '{0}{1}'.format(
host.print_prefix,
click.style('Connected', 'green'),
)
if for_fact:
log_message = '{0}{1}'.format(
log_message,
' (for {0} fact)'.format(for_fact),
)
logger.info(log_message)
return client
except AuthenticationException:
auth_kwargs = {}
for key, value in kwargs.items():
if key in ('username', 'password'):
auth_kwargs[key] = value
continue
if key == 'pkey' and value:
auth_kwargs['key'] = host.data.ssh_key
auth_args = ', '.join(
'{0}={1}'.format(key, value)
for key, value in auth_kwargs.items()
)
_log_connect_error(host, 'Authentication error', auth_args)
except SSHException as e:
_log_connect_error(host, 'SSH error', e)
except gaierror:
_log_connect_error(host, 'Could not resolve hostname', hostname)
except socket_error as e:
_log_connect_error(host, 'Could not connect', e)
except EOFError as e:
_log_connect_error(host, 'EOF error', e) | [
"def",
"connect",
"(",
"state",
",",
"host",
",",
"for_fact",
"=",
"None",
")",
":",
"kwargs",
"=",
"_make_paramiko_kwargs",
"(",
"state",
",",
"host",
")",
"logger",
".",
"debug",
"(",
"'Connecting to: {0} ({1})'",
".",
"format",
"(",
"host",
".",
"name",... | Connect to a single host. Returns the SSH client if succesful. Stateless by
design so can be run in parallel. | [
"Connect",
"to",
"a",
"single",
"host",
".",
"Returns",
"the",
"SSH",
"client",
"if",
"succesful",
".",
"Stateless",
"by",
"design",
"so",
"can",
"be",
"run",
"in",
"parallel",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/connectors/ssh.py#L150-L219 | train | 22,863 |
Fizzadar/pyinfra | pyinfra/api/connectors/ssh.py | run_shell_command | def run_shell_command(
state, host, command,
get_pty=False, timeout=None, print_output=False,
**command_kwargs
):
'''
Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
get_pty (boolean): whether to get a PTY before executing the command
env (dict): envrionment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
'''
command = make_command(command, **command_kwargs)
logger.debug('Running command on {0}: (pty={1}) {2}'.format(
host.name, get_pty, command,
))
if print_output:
print('{0}>>> {1}'.format(host.print_prefix, command))
# Run it! Get stdout, stderr & the underlying channel
_, stdout_buffer, stderr_buffer = host.connection.exec_command(
command,
get_pty=get_pty,
)
channel = stdout_buffer.channel
# Iterate through outputs to get an exit status and generate desired list
# output, done in two greenlets so stdout isn't printed before stderr. Not
# attached to state.pool to avoid blocking it with 2x n-hosts greenlets.
stdout_reader = gevent.spawn(
read_buffer, stdout_buffer,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(host.print_prefix, line),
)
stderr_reader = gevent.spawn(
read_buffer, stderr_buffer,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(
host.print_prefix, click.style(line, 'red'),
),
)
# Wait on output, with our timeout (or None)
greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout)
# Timeout doesn't raise an exception, but gevent.wait returns the greenlets
# which did complete. So if both haven't completed, we kill them and fail
# with a timeout.
if len(greenlets) != 2:
stdout_reader.kill()
stderr_reader.kill()
raise timeout_error()
# Read the buffers into a list of lines
stdout = stdout_reader.get()
stderr = stderr_reader.get()
logger.debug('Waiting for exit status...')
exit_status = channel.recv_exit_status()
logger.debug('Command exit status: {0}'.format(exit_status))
return exit_status == 0, stdout, stderr | python | def run_shell_command(
state, host, command,
get_pty=False, timeout=None, print_output=False,
**command_kwargs
):
'''
Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
get_pty (boolean): whether to get a PTY before executing the command
env (dict): envrionment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer.
'''
command = make_command(command, **command_kwargs)
logger.debug('Running command on {0}: (pty={1}) {2}'.format(
host.name, get_pty, command,
))
if print_output:
print('{0}>>> {1}'.format(host.print_prefix, command))
# Run it! Get stdout, stderr & the underlying channel
_, stdout_buffer, stderr_buffer = host.connection.exec_command(
command,
get_pty=get_pty,
)
channel = stdout_buffer.channel
# Iterate through outputs to get an exit status and generate desired list
# output, done in two greenlets so stdout isn't printed before stderr. Not
# attached to state.pool to avoid blocking it with 2x n-hosts greenlets.
stdout_reader = gevent.spawn(
read_buffer, stdout_buffer,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(host.print_prefix, line),
)
stderr_reader = gevent.spawn(
read_buffer, stderr_buffer,
print_output=print_output,
print_func=lambda line: '{0}{1}'.format(
host.print_prefix, click.style(line, 'red'),
),
)
# Wait on output, with our timeout (or None)
greenlets = gevent.wait((stdout_reader, stderr_reader), timeout=timeout)
# Timeout doesn't raise an exception, but gevent.wait returns the greenlets
# which did complete. So if both haven't completed, we kill them and fail
# with a timeout.
if len(greenlets) != 2:
stdout_reader.kill()
stderr_reader.kill()
raise timeout_error()
# Read the buffers into a list of lines
stdout = stdout_reader.get()
stderr = stderr_reader.get()
logger.debug('Waiting for exit status...')
exit_status = channel.recv_exit_status()
logger.debug('Command exit status: {0}'.format(exit_status))
return exit_status == 0, stdout, stderr | [
"def",
"run_shell_command",
"(",
"state",
",",
"host",
",",
"command",
",",
"get_pty",
"=",
"False",
",",
"timeout",
"=",
"None",
",",
"print_output",
"=",
"False",
",",
"*",
"*",
"command_kwargs",
")",
":",
"command",
"=",
"make_command",
"(",
"command",
... | Execute a command on the specified host.
Args:
state (``pyinfra.api.State`` obj): state object for this command
hostname (string): hostname of the target
command (string): actual command to execute
sudo (boolean): whether to wrap the command with sudo
sudo_user (string): user to sudo to
get_pty (boolean): whether to get a PTY before executing the command
env (dict): envrionment variables to set
timeout (int): timeout for this command to complete before erroring
Returns:
tuple: (exit_code, stdout, stderr)
stdout and stderr are both lists of strings from each buffer. | [
"Execute",
"a",
"command",
"on",
"the",
"specified",
"host",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/connectors/ssh.py#L222-L298 | train | 22,864 |
Fizzadar/pyinfra | pyinfra/api/connectors/ssh.py | put_file | def put_file(
state, host, filename_or_io, remote_filename,
sudo=False, sudo_user=None, su_user=None, print_output=False,
):
'''
Upload file-ios to the specified host using SFTP. Supports uploading files
with sudo by uploading to a temporary directory then moving & chowning.
'''
# sudo/su are a little more complicated, as you can only sftp with the SSH
# user connected, so upload to tmp and copy/chown w/sudo and/or su_user
if sudo or su_user:
# Get temp file location
temp_file = state.get_temp_filename(remote_filename)
_put_file(host, filename_or_io, temp_file)
if print_output:
print('{0}file uploaded: {1}'.format(host.print_prefix, remote_filename))
# Execute run_shell_command w/sudo and/or su_user
command = 'mv {0} {1}'.format(temp_file, remote_filename)
# Move it to the su_user if present
if su_user:
command = '{0} && chown {1} {2}'.format(command, su_user, remote_filename)
# Otherwise any sudo_user
elif sudo_user:
command = '{0} && chown {1} {2}'.format(command, sudo_user, remote_filename)
status, _, stderr = run_shell_command(
state, host, command,
sudo=sudo, sudo_user=sudo_user, su_user=su_user,
print_output=print_output,
)
if status is False:
logger.error('File error: {0}'.format('\n'.join(stderr)))
return False
# No sudo and no su_user, so just upload it!
else:
_put_file(host, filename_or_io, remote_filename)
if print_output:
print('{0}file uploaded: {1}'.format(host.print_prefix, remote_filename))
return True | python | def put_file(
state, host, filename_or_io, remote_filename,
sudo=False, sudo_user=None, su_user=None, print_output=False,
):
'''
Upload file-ios to the specified host using SFTP. Supports uploading files
with sudo by uploading to a temporary directory then moving & chowning.
'''
# sudo/su are a little more complicated, as you can only sftp with the SSH
# user connected, so upload to tmp and copy/chown w/sudo and/or su_user
if sudo or su_user:
# Get temp file location
temp_file = state.get_temp_filename(remote_filename)
_put_file(host, filename_or_io, temp_file)
if print_output:
print('{0}file uploaded: {1}'.format(host.print_prefix, remote_filename))
# Execute run_shell_command w/sudo and/or su_user
command = 'mv {0} {1}'.format(temp_file, remote_filename)
# Move it to the su_user if present
if su_user:
command = '{0} && chown {1} {2}'.format(command, su_user, remote_filename)
# Otherwise any sudo_user
elif sudo_user:
command = '{0} && chown {1} {2}'.format(command, sudo_user, remote_filename)
status, _, stderr = run_shell_command(
state, host, command,
sudo=sudo, sudo_user=sudo_user, su_user=su_user,
print_output=print_output,
)
if status is False:
logger.error('File error: {0}'.format('\n'.join(stderr)))
return False
# No sudo and no su_user, so just upload it!
else:
_put_file(host, filename_or_io, remote_filename)
if print_output:
print('{0}file uploaded: {1}'.format(host.print_prefix, remote_filename))
return True | [
"def",
"put_file",
"(",
"state",
",",
"host",
",",
"filename_or_io",
",",
"remote_filename",
",",
"sudo",
"=",
"False",
",",
"sudo_user",
"=",
"None",
",",
"su_user",
"=",
"None",
",",
"print_output",
"=",
"False",
",",
")",
":",
"# sudo/su are a little more... | Upload file-ios to the specified host using SFTP. Supports uploading files
with sudo by uploading to a temporary directory then moving & chowning. | [
"Upload",
"file",
"-",
"ios",
"to",
"the",
"specified",
"host",
"using",
"SFTP",
".",
"Supports",
"uploading",
"files",
"with",
"sudo",
"by",
"uploading",
"to",
"a",
"temporary",
"directory",
"then",
"moving",
"&",
"chowning",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/connectors/ssh.py#L331-L378 | train | 22,865 |
Fizzadar/pyinfra | pyinfra/api/state.py | State.deploy | def deploy(self, name, kwargs, data, line_number, in_deploy=True):
'''
Wraps a group of operations as a deploy, this should not be used
directly, instead use ``pyinfra.api.deploy.deploy``.
'''
# Handle nested deploy names
if self.deploy_name:
name = _make_name(self.deploy_name, name)
# Store the previous values
old_in_deploy = self.in_deploy
old_deploy_name = self.deploy_name
old_deploy_kwargs = self.deploy_kwargs
old_deploy_data = self.deploy_data
old_deploy_line_numbers = self.deploy_line_numbers
self.in_deploy = in_deploy
# Limit the new hosts to a subset of the old hosts if they existed
if (
old_deploy_kwargs
and old_deploy_kwargs.get('hosts') is not None
):
# If we have hosts - subset them based on the old hosts
if 'hosts' in kwargs:
kwargs['hosts'] = [
host for host in kwargs['hosts']
if host in old_deploy_kwargs['hosts']
]
# Otherwise simply carry the previous hosts
else:
kwargs['hosts'] = old_deploy_kwargs['hosts']
# Make new line numbers - note convert from and back to tuple to avoid
# keeping deploy_line_numbers mutable.
new_line_numbers = list(self.deploy_line_numbers or [])
new_line_numbers.append(line_number)
new_line_numbers = tuple(new_line_numbers)
# Set the new values
self.deploy_name = name
self.deploy_kwargs = kwargs
self.deploy_data = data
self.deploy_line_numbers = new_line_numbers
logger.debug('Starting deploy {0} (args={1}, data={2})'.format(
name, kwargs, data,
))
yield
# Restore the previous values
self.in_deploy = old_in_deploy
self.deploy_name = old_deploy_name
self.deploy_kwargs = old_deploy_kwargs
self.deploy_data = old_deploy_data
self.deploy_line_numbers = old_deploy_line_numbers
logger.debug('Reset deploy to {0} (args={1}, data={2})'.format(
old_deploy_name, old_deploy_kwargs, old_deploy_data,
)) | python | def deploy(self, name, kwargs, data, line_number, in_deploy=True):
'''
Wraps a group of operations as a deploy, this should not be used
directly, instead use ``pyinfra.api.deploy.deploy``.
'''
# Handle nested deploy names
if self.deploy_name:
name = _make_name(self.deploy_name, name)
# Store the previous values
old_in_deploy = self.in_deploy
old_deploy_name = self.deploy_name
old_deploy_kwargs = self.deploy_kwargs
old_deploy_data = self.deploy_data
old_deploy_line_numbers = self.deploy_line_numbers
self.in_deploy = in_deploy
# Limit the new hosts to a subset of the old hosts if they existed
if (
old_deploy_kwargs
and old_deploy_kwargs.get('hosts') is not None
):
# If we have hosts - subset them based on the old hosts
if 'hosts' in kwargs:
kwargs['hosts'] = [
host for host in kwargs['hosts']
if host in old_deploy_kwargs['hosts']
]
# Otherwise simply carry the previous hosts
else:
kwargs['hosts'] = old_deploy_kwargs['hosts']
# Make new line numbers - note convert from and back to tuple to avoid
# keeping deploy_line_numbers mutable.
new_line_numbers = list(self.deploy_line_numbers or [])
new_line_numbers.append(line_number)
new_line_numbers = tuple(new_line_numbers)
# Set the new values
self.deploy_name = name
self.deploy_kwargs = kwargs
self.deploy_data = data
self.deploy_line_numbers = new_line_numbers
logger.debug('Starting deploy {0} (args={1}, data={2})'.format(
name, kwargs, data,
))
yield
# Restore the previous values
self.in_deploy = old_in_deploy
self.deploy_name = old_deploy_name
self.deploy_kwargs = old_deploy_kwargs
self.deploy_data = old_deploy_data
self.deploy_line_numbers = old_deploy_line_numbers
logger.debug('Reset deploy to {0} (args={1}, data={2})'.format(
old_deploy_name, old_deploy_kwargs, old_deploy_data,
)) | [
"def",
"deploy",
"(",
"self",
",",
"name",
",",
"kwargs",
",",
"data",
",",
"line_number",
",",
"in_deploy",
"=",
"True",
")",
":",
"# Handle nested deploy names",
"if",
"self",
".",
"deploy_name",
":",
"name",
"=",
"_make_name",
"(",
"self",
".",
"deploy_... | Wraps a group of operations as a deploy, this should not be used
directly, instead use ``pyinfra.api.deploy.deploy``. | [
"Wraps",
"a",
"group",
"of",
"operations",
"as",
"a",
"deploy",
"this",
"should",
"not",
"be",
"used",
"directly",
"instead",
"use",
"pyinfra",
".",
"api",
".",
"deploy",
".",
"deploy",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/state.py#L275-L334 | train | 22,866 |
Fizzadar/pyinfra | pyinfra/api/state.py | State.activate_host | def activate_host(self, host):
'''
Flag a host as active.
'''
logger.debug('Activating host: {0}'.format(host))
# Add to *both* activated and active - active will reduce as hosts fail
# but connected will not, enabling us to track failed %.
self.activated_hosts.add(host)
self.active_hosts.add(host) | python | def activate_host(self, host):
'''
Flag a host as active.
'''
logger.debug('Activating host: {0}'.format(host))
# Add to *both* activated and active - active will reduce as hosts fail
# but connected will not, enabling us to track failed %.
self.activated_hosts.add(host)
self.active_hosts.add(host) | [
"def",
"activate_host",
"(",
"self",
",",
"host",
")",
":",
"logger",
".",
"debug",
"(",
"'Activating host: {0}'",
".",
"format",
"(",
"host",
")",
")",
"# Add to *both* activated and active - active will reduce as hosts fail",
"# but connected will not, enabling us to track ... | Flag a host as active. | [
"Flag",
"a",
"host",
"as",
"active",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/state.py#L345-L355 | train | 22,867 |
Fizzadar/pyinfra | pyinfra/api/state.py | State.fail_hosts | def fail_hosts(self, hosts_to_fail, activated_count=None):
'''
Flag a ``set`` of hosts as failed, error for ``config.FAIL_PERCENT``.
'''
if not hosts_to_fail:
return
activated_count = activated_count or len(self.activated_hosts)
logger.debug('Failing hosts: {0}'.format(', '.join(
(host.name for host in hosts_to_fail),
)))
# Remove the failed hosts from the inventory
self.active_hosts -= hosts_to_fail
# Check we're not above the fail percent
active_hosts = self.active_hosts
# No hosts left!
if not active_hosts:
raise PyinfraError('No hosts remaining!')
if self.config.FAIL_PERCENT is not None:
percent_failed = (
1 - len(active_hosts) / activated_count
) * 100
if percent_failed > self.config.FAIL_PERCENT:
raise PyinfraError('Over {0}% of hosts failed ({1}%)'.format(
self.config.FAIL_PERCENT,
int(round(percent_failed)),
)) | python | def fail_hosts(self, hosts_to_fail, activated_count=None):
'''
Flag a ``set`` of hosts as failed, error for ``config.FAIL_PERCENT``.
'''
if not hosts_to_fail:
return
activated_count = activated_count or len(self.activated_hosts)
logger.debug('Failing hosts: {0}'.format(', '.join(
(host.name for host in hosts_to_fail),
)))
# Remove the failed hosts from the inventory
self.active_hosts -= hosts_to_fail
# Check we're not above the fail percent
active_hosts = self.active_hosts
# No hosts left!
if not active_hosts:
raise PyinfraError('No hosts remaining!')
if self.config.FAIL_PERCENT is not None:
percent_failed = (
1 - len(active_hosts) / activated_count
) * 100
if percent_failed > self.config.FAIL_PERCENT:
raise PyinfraError('Over {0}% of hosts failed ({1}%)'.format(
self.config.FAIL_PERCENT,
int(round(percent_failed)),
)) | [
"def",
"fail_hosts",
"(",
"self",
",",
"hosts_to_fail",
",",
"activated_count",
"=",
"None",
")",
":",
"if",
"not",
"hosts_to_fail",
":",
"return",
"activated_count",
"=",
"activated_count",
"or",
"len",
"(",
"self",
".",
"activated_hosts",
")",
"logger",
".",... | Flag a ``set`` of hosts as failed, error for ``config.FAIL_PERCENT``. | [
"Flag",
"a",
"set",
"of",
"hosts",
"as",
"failed",
"error",
"for",
"config",
".",
"FAIL_PERCENT",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/state.py#L365-L398 | train | 22,868 |
Fizzadar/pyinfra | pyinfra/api/state.py | State.is_host_in_limit | def is_host_in_limit(self, host):
'''
Returns a boolean indicating if the host is within the current state limit.
'''
limit_hosts = self.limit_hosts
if not isinstance(limit_hosts, list):
return True
return host in limit_hosts | python | def is_host_in_limit(self, host):
'''
Returns a boolean indicating if the host is within the current state limit.
'''
limit_hosts = self.limit_hosts
if not isinstance(limit_hosts, list):
return True
return host in limit_hosts | [
"def",
"is_host_in_limit",
"(",
"self",
",",
"host",
")",
":",
"limit_hosts",
"=",
"self",
".",
"limit_hosts",
"if",
"not",
"isinstance",
"(",
"limit_hosts",
",",
"list",
")",
":",
"return",
"True",
"return",
"host",
"in",
"limit_hosts"
] | Returns a boolean indicating if the host is within the current state limit. | [
"Returns",
"a",
"boolean",
"indicating",
"if",
"the",
"host",
"is",
"within",
"the",
"current",
"state",
"limit",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/state.py#L400-L409 | train | 22,869 |
Fizzadar/pyinfra | pyinfra/api/state.py | State.get_temp_filename | def get_temp_filename(self, hash_key=None):
'''
Generate a temporary filename for this deploy.
'''
if not hash_key:
hash_key = six.text_type(uuid4())
temp_filename = '{0}/{1}'.format(
self.config.TEMP_DIR, sha1_hash(hash_key),
)
return temp_filename | python | def get_temp_filename(self, hash_key=None):
'''
Generate a temporary filename for this deploy.
'''
if not hash_key:
hash_key = six.text_type(uuid4())
temp_filename = '{0}/{1}'.format(
self.config.TEMP_DIR, sha1_hash(hash_key),
)
return temp_filename | [
"def",
"get_temp_filename",
"(",
"self",
",",
"hash_key",
"=",
"None",
")",
":",
"if",
"not",
"hash_key",
":",
"hash_key",
"=",
"six",
".",
"text_type",
"(",
"uuid4",
"(",
")",
")",
"temp_filename",
"=",
"'{0}/{1}'",
".",
"format",
"(",
"self",
".",
"c... | Generate a temporary filename for this deploy. | [
"Generate",
"a",
"temporary",
"filename",
"for",
"this",
"deploy",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/state.py#L411-L423 | train | 22,870 |
Fizzadar/pyinfra | pyinfra/api/operations.py | _run_server_ops | def _run_server_ops(state, host, progress=None):
'''
Run all ops for a single server.
'''
logger.debug('Running all ops on {0}'.format(host))
for op_hash in state.get_op_order():
op_meta = state.op_meta[op_hash]
logger.info('--> {0} {1} on {2}'.format(
click.style('--> Starting operation:', 'blue'),
click.style(', '.join(op_meta['names']), bold=True),
click.style(host.name, bold=True),
))
result = _run_server_op(state, host, op_hash)
# Trigger CLI progress if provided
if progress:
progress((host, op_hash))
if result is False:
raise PyinfraError('Error in operation {0} on {1}'.format(
', '.join(op_meta['names']), host,
))
if pyinfra.is_cli:
print() | python | def _run_server_ops(state, host, progress=None):
'''
Run all ops for a single server.
'''
logger.debug('Running all ops on {0}'.format(host))
for op_hash in state.get_op_order():
op_meta = state.op_meta[op_hash]
logger.info('--> {0} {1} on {2}'.format(
click.style('--> Starting operation:', 'blue'),
click.style(', '.join(op_meta['names']), bold=True),
click.style(host.name, bold=True),
))
result = _run_server_op(state, host, op_hash)
# Trigger CLI progress if provided
if progress:
progress((host, op_hash))
if result is False:
raise PyinfraError('Error in operation {0} on {1}'.format(
', '.join(op_meta['names']), host,
))
if pyinfra.is_cli:
print() | [
"def",
"_run_server_ops",
"(",
"state",
",",
"host",
",",
"progress",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'Running all ops on {0}'",
".",
"format",
"(",
"host",
")",
")",
"for",
"op_hash",
"in",
"state",
".",
"get_op_order",
"(",
")",
":... | Run all ops for a single server. | [
"Run",
"all",
"ops",
"for",
"a",
"single",
"server",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/operations.py#L216-L244 | train | 22,871 |
Fizzadar/pyinfra | pyinfra/api/operations.py | _run_serial_ops | def _run_serial_ops(state):
'''
Run all ops for all servers, one server at a time.
'''
for host in list(state.inventory):
host_operations = product([host], state.get_op_order())
with progress_spinner(host_operations) as progress:
try:
_run_server_ops(
state, host,
progress=progress,
)
except PyinfraError:
state.fail_hosts({host}) | python | def _run_serial_ops(state):
'''
Run all ops for all servers, one server at a time.
'''
for host in list(state.inventory):
host_operations = product([host], state.get_op_order())
with progress_spinner(host_operations) as progress:
try:
_run_server_ops(
state, host,
progress=progress,
)
except PyinfraError:
state.fail_hosts({host}) | [
"def",
"_run_serial_ops",
"(",
"state",
")",
":",
"for",
"host",
"in",
"list",
"(",
"state",
".",
"inventory",
")",
":",
"host_operations",
"=",
"product",
"(",
"[",
"host",
"]",
",",
"state",
".",
"get_op_order",
"(",
")",
")",
"with",
"progress_spinner... | Run all ops for all servers, one server at a time. | [
"Run",
"all",
"ops",
"for",
"all",
"servers",
"one",
"server",
"at",
"a",
"time",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/operations.py#L247-L261 | train | 22,872 |
Fizzadar/pyinfra | pyinfra/api/operations.py | _run_no_wait_ops | def _run_no_wait_ops(state):
'''
Run all ops for all servers at once.
'''
hosts_operations = product(state.inventory, state.get_op_order())
with progress_spinner(hosts_operations) as progress:
# Spawn greenlet for each host to run *all* ops
greenlets = [
state.pool.spawn(
_run_server_ops, state, host,
progress=progress,
)
for host in state.inventory
]
gevent.joinall(greenlets) | python | def _run_no_wait_ops(state):
'''
Run all ops for all servers at once.
'''
hosts_operations = product(state.inventory, state.get_op_order())
with progress_spinner(hosts_operations) as progress:
# Spawn greenlet for each host to run *all* ops
greenlets = [
state.pool.spawn(
_run_server_ops, state, host,
progress=progress,
)
for host in state.inventory
]
gevent.joinall(greenlets) | [
"def",
"_run_no_wait_ops",
"(",
"state",
")",
":",
"hosts_operations",
"=",
"product",
"(",
"state",
".",
"inventory",
",",
"state",
".",
"get_op_order",
"(",
")",
")",
"with",
"progress_spinner",
"(",
"hosts_operations",
")",
"as",
"progress",
":",
"# Spawn g... | Run all ops for all servers at once. | [
"Run",
"all",
"ops",
"for",
"all",
"servers",
"at",
"once",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/operations.py#L264-L279 | train | 22,873 |
Fizzadar/pyinfra | pyinfra/api/operations.py | _run_single_op | def _run_single_op(state, op_hash):
'''
Run a single operation for all servers. Can be configured to run in serial.
'''
op_meta = state.op_meta[op_hash]
op_types = []
if op_meta['serial']:
op_types.append('serial')
if op_meta['run_once']:
op_types.append('run once')
logger.info('{0} {1} {2}'.format(
click.style('--> Starting{0}operation:'.format(
' {0} '.format(', '.join(op_types)) if op_types else ' ',
), 'blue'),
click.style(', '.join(op_meta['names']), bold=True),
tuple(op_meta['args']) if op_meta['args'] else '',
))
failed_hosts = set()
if op_meta['serial']:
with progress_spinner(state.inventory) as progress:
# For each host, run the op
for host in state.inventory:
result = _run_server_op(state, host, op_hash)
progress(host)
if not result:
failed_hosts.add(host)
else:
# Start with the whole inventory in one batch
batches = [state.inventory]
# If parallel set break up the inventory into a series of batches
if op_meta['parallel']:
parallel = op_meta['parallel']
hosts = list(state.inventory)
batches = [
hosts[i:i + parallel]
for i in range(0, len(hosts), parallel)
]
for batch in batches:
with progress_spinner(batch) as progress:
# Spawn greenlet for each host
greenlet_to_host = {
state.pool.spawn(_run_server_op, state, host, op_hash): host
for host in batch
}
# Trigger CLI progress as hosts complete if provided
for greenlet in gevent.iwait(greenlet_to_host.keys()):
host = greenlet_to_host[greenlet]
progress(host)
# Get all the results
for greenlet, host in six.iteritems(greenlet_to_host):
if not greenlet.get():
failed_hosts.add(host)
# Now all the batches/hosts are complete, fail any failures
if not op_meta['ignore_errors']:
state.fail_hosts(failed_hosts)
if pyinfra.is_cli:
print() | python | def _run_single_op(state, op_hash):
'''
Run a single operation for all servers. Can be configured to run in serial.
'''
op_meta = state.op_meta[op_hash]
op_types = []
if op_meta['serial']:
op_types.append('serial')
if op_meta['run_once']:
op_types.append('run once')
logger.info('{0} {1} {2}'.format(
click.style('--> Starting{0}operation:'.format(
' {0} '.format(', '.join(op_types)) if op_types else ' ',
), 'blue'),
click.style(', '.join(op_meta['names']), bold=True),
tuple(op_meta['args']) if op_meta['args'] else '',
))
failed_hosts = set()
if op_meta['serial']:
with progress_spinner(state.inventory) as progress:
# For each host, run the op
for host in state.inventory:
result = _run_server_op(state, host, op_hash)
progress(host)
if not result:
failed_hosts.add(host)
else:
# Start with the whole inventory in one batch
batches = [state.inventory]
# If parallel set break up the inventory into a series of batches
if op_meta['parallel']:
parallel = op_meta['parallel']
hosts = list(state.inventory)
batches = [
hosts[i:i + parallel]
for i in range(0, len(hosts), parallel)
]
for batch in batches:
with progress_spinner(batch) as progress:
# Spawn greenlet for each host
greenlet_to_host = {
state.pool.spawn(_run_server_op, state, host, op_hash): host
for host in batch
}
# Trigger CLI progress as hosts complete if provided
for greenlet in gevent.iwait(greenlet_to_host.keys()):
host = greenlet_to_host[greenlet]
progress(host)
# Get all the results
for greenlet, host in six.iteritems(greenlet_to_host):
if not greenlet.get():
failed_hosts.add(host)
# Now all the batches/hosts are complete, fail any failures
if not op_meta['ignore_errors']:
state.fail_hosts(failed_hosts)
if pyinfra.is_cli:
print() | [
"def",
"_run_single_op",
"(",
"state",
",",
"op_hash",
")",
":",
"op_meta",
"=",
"state",
".",
"op_meta",
"[",
"op_hash",
"]",
"op_types",
"=",
"[",
"]",
"if",
"op_meta",
"[",
"'serial'",
"]",
":",
"op_types",
".",
"append",
"(",
"'serial'",
")",
"if",... | Run a single operation for all servers. Can be configured to run in serial. | [
"Run",
"a",
"single",
"operation",
"for",
"all",
"servers",
".",
"Can",
"be",
"configured",
"to",
"run",
"in",
"serial",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/operations.py#L282-L354 | train | 22,874 |
Fizzadar/pyinfra | pyinfra/api/operations.py | run_ops | def run_ops(state, serial=False, no_wait=False):
'''
Runs all operations across all servers in a configurable manner.
Args:
state (``pyinfra.api.State`` obj): the deploy state to execute
serial (boolean): whether to run operations host by host
no_wait (boolean): whether to wait for all hosts between operations
'''
# Flag state as deploy in process
state.deploying = True
# Run all ops, but server by server
if serial:
_run_serial_ops(state)
# Run all the ops on each server in parallel (not waiting at each operation)
elif no_wait:
_run_no_wait_ops(state)
# Default: run all ops in order, waiting at each for all servers to complete
for op_hash in state.get_op_order():
_run_single_op(state, op_hash) | python | def run_ops(state, serial=False, no_wait=False):
'''
Runs all operations across all servers in a configurable manner.
Args:
state (``pyinfra.api.State`` obj): the deploy state to execute
serial (boolean): whether to run operations host by host
no_wait (boolean): whether to wait for all hosts between operations
'''
# Flag state as deploy in process
state.deploying = True
# Run all ops, but server by server
if serial:
_run_serial_ops(state)
# Run all the ops on each server in parallel (not waiting at each operation)
elif no_wait:
_run_no_wait_ops(state)
# Default: run all ops in order, waiting at each for all servers to complete
for op_hash in state.get_op_order():
_run_single_op(state, op_hash) | [
"def",
"run_ops",
"(",
"state",
",",
"serial",
"=",
"False",
",",
"no_wait",
"=",
"False",
")",
":",
"# Flag state as deploy in process",
"state",
".",
"deploying",
"=",
"True",
"# Run all ops, but server by server",
"if",
"serial",
":",
"_run_serial_ops",
"(",
"s... | Runs all operations across all servers in a configurable manner.
Args:
state (``pyinfra.api.State`` obj): the deploy state to execute
serial (boolean): whether to run operations host by host
no_wait (boolean): whether to wait for all hosts between operations | [
"Runs",
"all",
"operations",
"across",
"all",
"servers",
"in",
"a",
"configurable",
"manner",
"."
] | 006f751f7db2e07d32522c0285160783de2feb79 | https://github.com/Fizzadar/pyinfra/blob/006f751f7db2e07d32522c0285160783de2feb79/pyinfra/api/operations.py#L357-L380 | train | 22,875 |
eruvanos/openbrokerapi | openbrokerapi/api.py | serve | def serve(service_brokers: Union[List[ServiceBroker], ServiceBroker],
credentials: Union[List[BrokerCredentials], BrokerCredentials, None],
logger: logging.Logger = logging.root,
port=5000,
debug=False):
"""
Starts flask with the given brokers.
You can provide a list or just one ServiceBroker
:param service_brokers: ServicesBroker for services to provide
:param credentials: Username and password that will be required to communicate with service broker
:param logger: Used for api logs. This will not influence Flasks logging behavior
:param port: Port
:param debug: Enables debugging in flask app
"""
from gevent.pywsgi import WSGIServer
from flask import Flask
app = Flask(__name__)
app.debug = debug
blueprint = get_blueprint(service_brokers, credentials, logger)
logger.debug("Register openbrokerapi blueprint")
app.register_blueprint(blueprint)
logger.info("Start Flask on 0.0.0.0:%s" % port)
http_server = WSGIServer(('0.0.0.0', port), app)
http_server.serve_forever() | python | def serve(service_brokers: Union[List[ServiceBroker], ServiceBroker],
credentials: Union[List[BrokerCredentials], BrokerCredentials, None],
logger: logging.Logger = logging.root,
port=5000,
debug=False):
"""
Starts flask with the given brokers.
You can provide a list or just one ServiceBroker
:param service_brokers: ServicesBroker for services to provide
:param credentials: Username and password that will be required to communicate with service broker
:param logger: Used for api logs. This will not influence Flasks logging behavior
:param port: Port
:param debug: Enables debugging in flask app
"""
from gevent.pywsgi import WSGIServer
from flask import Flask
app = Flask(__name__)
app.debug = debug
blueprint = get_blueprint(service_brokers, credentials, logger)
logger.debug("Register openbrokerapi blueprint")
app.register_blueprint(blueprint)
logger.info("Start Flask on 0.0.0.0:%s" % port)
http_server = WSGIServer(('0.0.0.0', port), app)
http_server.serve_forever() | [
"def",
"serve",
"(",
"service_brokers",
":",
"Union",
"[",
"List",
"[",
"ServiceBroker",
"]",
",",
"ServiceBroker",
"]",
",",
"credentials",
":",
"Union",
"[",
"List",
"[",
"BrokerCredentials",
"]",
",",
"BrokerCredentials",
",",
"None",
"]",
",",
"logger",
... | Starts flask with the given brokers.
You can provide a list or just one ServiceBroker
:param service_brokers: ServicesBroker for services to provide
:param credentials: Username and password that will be required to communicate with service broker
:param logger: Used for api logs. This will not influence Flasks logging behavior
:param port: Port
:param debug: Enables debugging in flask app | [
"Starts",
"flask",
"with",
"the",
"given",
"brokers",
".",
"You",
"can",
"provide",
"a",
"list",
"or",
"just",
"one",
"ServiceBroker"
] | 29d514e5932f2eac27e03995dd41c8cecf40bb10 | https://github.com/eruvanos/openbrokerapi/blob/29d514e5932f2eac27e03995dd41c8cecf40bb10/openbrokerapi/api.py#L320-L348 | train | 22,876 |
romana/multi-ping | multiping/__init__.py | multi_ping | def multi_ping(dest_addrs, timeout, retry=0, ignore_lookup_errors=False):
"""
Combine send and receive measurement into single function.
This offers a retry mechanism: Overall timeout time is divided by
number of retries. Additional ICMPecho packets are sent to those
addresses from which we have not received answers, yet.
The retry mechanism is useful, because individual ICMP packets may get
lost.
If 'retry' is set to 0 then only a single packet is sent to each
address.
If 'ignore_lookup_errors' is set then any issues with resolving target
names or looking up their address information will silently be ignored.
Those targets simply appear in the 'no_results' return list.
"""
retry = int(retry)
if retry < 0:
retry = 0
timeout = float(timeout)
if timeout < 0.1:
raise MultiPingError("Timeout < 0.1 seconds not allowed")
retry_timeout = float(timeout) / (retry + 1)
if retry_timeout < 0.1:
raise MultiPingError("Time between ping retries < 0.1 seconds")
mp = MultiPing(dest_addrs, ignore_lookup_errors=ignore_lookup_errors)
results = {}
retry_count = 0
while retry_count <= retry:
# Send a batch of pings
mp.send()
single_results, no_results = mp.receive(retry_timeout)
# Add the results from the last sending of pings to the overall results
results.update(single_results)
if not no_results:
# No addresses left? We are done.
break
retry_count += 1
return results, no_results | python | def multi_ping(dest_addrs, timeout, retry=0, ignore_lookup_errors=False):
"""
Combine send and receive measurement into single function.
This offers a retry mechanism: Overall timeout time is divided by
number of retries. Additional ICMPecho packets are sent to those
addresses from which we have not received answers, yet.
The retry mechanism is useful, because individual ICMP packets may get
lost.
If 'retry' is set to 0 then only a single packet is sent to each
address.
If 'ignore_lookup_errors' is set then any issues with resolving target
names or looking up their address information will silently be ignored.
Those targets simply appear in the 'no_results' return list.
"""
retry = int(retry)
if retry < 0:
retry = 0
timeout = float(timeout)
if timeout < 0.1:
raise MultiPingError("Timeout < 0.1 seconds not allowed")
retry_timeout = float(timeout) / (retry + 1)
if retry_timeout < 0.1:
raise MultiPingError("Time between ping retries < 0.1 seconds")
mp = MultiPing(dest_addrs, ignore_lookup_errors=ignore_lookup_errors)
results = {}
retry_count = 0
while retry_count <= retry:
# Send a batch of pings
mp.send()
single_results, no_results = mp.receive(retry_timeout)
# Add the results from the last sending of pings to the overall results
results.update(single_results)
if not no_results:
# No addresses left? We are done.
break
retry_count += 1
return results, no_results | [
"def",
"multi_ping",
"(",
"dest_addrs",
",",
"timeout",
",",
"retry",
"=",
"0",
",",
"ignore_lookup_errors",
"=",
"False",
")",
":",
"retry",
"=",
"int",
"(",
"retry",
")",
"if",
"retry",
"<",
"0",
":",
"retry",
"=",
"0",
"timeout",
"=",
"float",
"("... | Combine send and receive measurement into single function.
This offers a retry mechanism: Overall timeout time is divided by
number of retries. Additional ICMPecho packets are sent to those
addresses from which we have not received answers, yet.
The retry mechanism is useful, because individual ICMP packets may get
lost.
If 'retry' is set to 0 then only a single packet is sent to each
address.
If 'ignore_lookup_errors' is set then any issues with resolving target
names or looking up their address information will silently be ignored.
Those targets simply appear in the 'no_results' return list. | [
"Combine",
"send",
"and",
"receive",
"measurement",
"into",
"single",
"function",
"."
] | 59f024c867a17fae5b4a7b52f97effc6fb1b0ca5 | https://github.com/romana/multi-ping/blob/59f024c867a17fae5b4a7b52f97effc6fb1b0ca5/multiping/__init__.py#L460-L506 | train | 22,877 |
romana/multi-ping | multiping/__init__.py | MultiPing._checksum | def _checksum(self, msg):
"""
Calculate the checksum of a packet.
This is inspired by a response on StackOverflow here:
https://stackoverflow.com/a/1769267/7242672
Thank you to StackOverflow user Jason Orendorff.
"""
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
s = 0
for i in range(0, len(msg), 2):
w = (msg[i] << 8) + msg[i + 1]
s = carry_around_add(s, w)
s = ~s & 0xffff
return s | python | def _checksum(self, msg):
"""
Calculate the checksum of a packet.
This is inspired by a response on StackOverflow here:
https://stackoverflow.com/a/1769267/7242672
Thank you to StackOverflow user Jason Orendorff.
"""
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
s = 0
for i in range(0, len(msg), 2):
w = (msg[i] << 8) + msg[i + 1]
s = carry_around_add(s, w)
s = ~s & 0xffff
return s | [
"def",
"_checksum",
"(",
"self",
",",
"msg",
")",
":",
"def",
"carry_around_add",
"(",
"a",
",",
"b",
")",
":",
"c",
"=",
"a",
"+",
"b",
"return",
"(",
"c",
"&",
"0xffff",
")",
"+",
"(",
"c",
">>",
"16",
")",
"s",
"=",
"0",
"for",
"i",
"in"... | Calculate the checksum of a packet.
This is inspired by a response on StackOverflow here:
https://stackoverflow.com/a/1769267/7242672
Thank you to StackOverflow user Jason Orendorff. | [
"Calculate",
"the",
"checksum",
"of",
"a",
"packet",
"."
] | 59f024c867a17fae5b4a7b52f97effc6fb1b0ca5 | https://github.com/romana/multi-ping/blob/59f024c867a17fae5b4a7b52f97effc6fb1b0ca5/multiping/__init__.py#L187-L207 | train | 22,878 |
romana/multi-ping | multiping/__init__.py | MultiPing.send | def send(self):
"""
Send pings to multiple addresses, ensuring unique IDs for each request.
This operation is non-blocking. Use 'receive' to get the results.
Send can be called multiple times. If there are any addresses left from
the previous send, from which results have not been received yet, then
it will resend pings to those remaining addresses.
"""
# Collect all the addresses for which we have not seen responses yet.
if not self._receive_has_been_called:
all_addrs = self._dest_addrs
else:
all_addrs = [a for (i, a) in list(self._id_to_addr.items())
if i in self._remaining_ids]
if self._last_used_id is None:
# Will attempt to continue at the last request ID we used. But if
# we never sent anything before then we create a first ID
# 'randomly' from the current time. ID is only a 16 bit field, so
# need to trim it down.
self._last_used_id = int(time.time()) & 0xffff
# Send ICMPecho to all addresses...
for addr in all_addrs:
# Make a unique ID, wrapping around at 65535.
self._last_used_id = (self._last_used_id + 1) & 0xffff
# Remember the address for each ID so we can produce meaningful
# result lists later on.
self._id_to_addr[self._last_used_id] = addr
# Send an ICMPecho request packet. We specify a payload consisting
# of the current time stamp. This is returned to us in the
# response and allows us to calculate the 'ping time'.
self._send_ping(addr, payload=struct.pack("d", time.time())) | python | def send(self):
"""
Send pings to multiple addresses, ensuring unique IDs for each request.
This operation is non-blocking. Use 'receive' to get the results.
Send can be called multiple times. If there are any addresses left from
the previous send, from which results have not been received yet, then
it will resend pings to those remaining addresses.
"""
# Collect all the addresses for which we have not seen responses yet.
if not self._receive_has_been_called:
all_addrs = self._dest_addrs
else:
all_addrs = [a for (i, a) in list(self._id_to_addr.items())
if i in self._remaining_ids]
if self._last_used_id is None:
# Will attempt to continue at the last request ID we used. But if
# we never sent anything before then we create a first ID
# 'randomly' from the current time. ID is only a 16 bit field, so
# need to trim it down.
self._last_used_id = int(time.time()) & 0xffff
# Send ICMPecho to all addresses...
for addr in all_addrs:
# Make a unique ID, wrapping around at 65535.
self._last_used_id = (self._last_used_id + 1) & 0xffff
# Remember the address for each ID so we can produce meaningful
# result lists later on.
self._id_to_addr[self._last_used_id] = addr
# Send an ICMPecho request packet. We specify a payload consisting
# of the current time stamp. This is returned to us in the
# response and allows us to calculate the 'ping time'.
self._send_ping(addr, payload=struct.pack("d", time.time())) | [
"def",
"send",
"(",
"self",
")",
":",
"# Collect all the addresses for which we have not seen responses yet.",
"if",
"not",
"self",
".",
"_receive_has_been_called",
":",
"all_addrs",
"=",
"self",
".",
"_dest_addrs",
"else",
":",
"all_addrs",
"=",
"[",
"a",
"for",
"(... | Send pings to multiple addresses, ensuring unique IDs for each request.
This operation is non-blocking. Use 'receive' to get the results.
Send can be called multiple times. If there are any addresses left from
the previous send, from which results have not been received yet, then
it will resend pings to those remaining addresses. | [
"Send",
"pings",
"to",
"multiple",
"addresses",
"ensuring",
"unique",
"IDs",
"for",
"each",
"request",
"."
] | 59f024c867a17fae5b4a7b52f97effc6fb1b0ca5 | https://github.com/romana/multi-ping/blob/59f024c867a17fae5b4a7b52f97effc6fb1b0ca5/multiping/__init__.py#L268-L303 | train | 22,879 |
romana/multi-ping | multiping/__init__.py | MultiPing._read_all_from_socket | def _read_all_from_socket(self, timeout):
"""
Read all packets we currently can on the socket.
Returns list of tuples. Each tuple contains a packet and the time at
which it was received. NOTE: The receive time is the time when our
recv() call returned, which greatly depends on when it was called. The
time is NOT the time at which the packet arrived at our host, but it's
the closest we can come to the real ping time.
If nothing was received within the timeout time, the return list is
empty.
First read is blocking with timeout, so we'll wait at least that long.
Then, in case any more packets have arrived, we read everything we can
from the socket in non-blocking mode.
"""
pkts = []
try:
self._sock.settimeout(timeout)
while True:
p = self._sock.recv(64)
# Store the packet and the current time
pkts.append((bytearray(p), time.time()))
# Continue the loop to receive any additional packets that
# may have arrived at this point. Changing the socket to
# non-blocking (by setting the timeout to 0), so that we'll
# only continue the loop until all current packets have been
# read.
self._sock.settimeout(0)
except socket.timeout:
# In the first blocking read with timout, we may not receive
# anything. This is not an error, it just means no data was
# available in the specified time.
pass
except socket.error as e:
# When we read in non-blocking mode, we may get this error with
# errno 11 to indicate that no more data is available. That's ok,
# just like the timeout.
if e.errno == errno.EWOULDBLOCK:
pass
else:
# We're not expecting any other socket exceptions, so we
# re-raise in that case.
raise
if self._ipv6_address_present:
try:
self._sock6.settimeout(timeout)
while True:
p = self._sock6.recv(128)
pkts.append((bytearray(p), time.time()))
self._sock6.settimeout(0)
except socket.timeout:
pass
except socket.error as e:
if e.errno == errno.EWOULDBLOCK:
pass
else:
raise
return pkts | python | def _read_all_from_socket(self, timeout):
"""
Read all packets we currently can on the socket.
Returns list of tuples. Each tuple contains a packet and the time at
which it was received. NOTE: The receive time is the time when our
recv() call returned, which greatly depends on when it was called. The
time is NOT the time at which the packet arrived at our host, but it's
the closest we can come to the real ping time.
If nothing was received within the timeout time, the return list is
empty.
First read is blocking with timeout, so we'll wait at least that long.
Then, in case any more packets have arrived, we read everything we can
from the socket in non-blocking mode.
"""
pkts = []
try:
self._sock.settimeout(timeout)
while True:
p = self._sock.recv(64)
# Store the packet and the current time
pkts.append((bytearray(p), time.time()))
# Continue the loop to receive any additional packets that
# may have arrived at this point. Changing the socket to
# non-blocking (by setting the timeout to 0), so that we'll
# only continue the loop until all current packets have been
# read.
self._sock.settimeout(0)
except socket.timeout:
# In the first blocking read with timout, we may not receive
# anything. This is not an error, it just means no data was
# available in the specified time.
pass
except socket.error as e:
# When we read in non-blocking mode, we may get this error with
# errno 11 to indicate that no more data is available. That's ok,
# just like the timeout.
if e.errno == errno.EWOULDBLOCK:
pass
else:
# We're not expecting any other socket exceptions, so we
# re-raise in that case.
raise
if self._ipv6_address_present:
try:
self._sock6.settimeout(timeout)
while True:
p = self._sock6.recv(128)
pkts.append((bytearray(p), time.time()))
self._sock6.settimeout(0)
except socket.timeout:
pass
except socket.error as e:
if e.errno == errno.EWOULDBLOCK:
pass
else:
raise
return pkts | [
"def",
"_read_all_from_socket",
"(",
"self",
",",
"timeout",
")",
":",
"pkts",
"=",
"[",
"]",
"try",
":",
"self",
".",
"_sock",
".",
"settimeout",
"(",
"timeout",
")",
"while",
"True",
":",
"p",
"=",
"self",
".",
"_sock",
".",
"recv",
"(",
"64",
")... | Read all packets we currently can on the socket.
Returns list of tuples. Each tuple contains a packet and the time at
which it was received. NOTE: The receive time is the time when our
recv() call returned, which greatly depends on when it was called. The
time is NOT the time at which the packet arrived at our host, but it's
the closest we can come to the real ping time.
If nothing was received within the timeout time, the return list is
empty.
First read is blocking with timeout, so we'll wait at least that long.
Then, in case any more packets have arrived, we read everything we can
from the socket in non-blocking mode. | [
"Read",
"all",
"packets",
"we",
"currently",
"can",
"on",
"the",
"socket",
"."
] | 59f024c867a17fae5b4a7b52f97effc6fb1b0ca5 | https://github.com/romana/multi-ping/blob/59f024c867a17fae5b4a7b52f97effc6fb1b0ca5/multiping/__init__.py#L305-L367 | train | 22,880 |
numat/midas | midas/driver.py | GasDetector.get | async def get(self):
"""Get current state from the Midas gas detector."""
try:
return self._parse(await self.read_registers(0, 16))
except TimeoutError:
return {'ip': self.ip, 'connected': False} | python | async def get(self):
"""Get current state from the Midas gas detector."""
try:
return self._parse(await self.read_registers(0, 16))
except TimeoutError:
return {'ip': self.ip, 'connected': False} | [
"async",
"def",
"get",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_parse",
"(",
"await",
"self",
".",
"read_registers",
"(",
"0",
",",
"16",
")",
")",
"except",
"TimeoutError",
":",
"return",
"{",
"'ip'",
":",
"self",
".",
"ip",
","... | Get current state from the Midas gas detector. | [
"Get",
"current",
"state",
"from",
"the",
"Midas",
"gas",
"detector",
"."
] | c3a97a6cd67df1283831c3c78bf3f984212e97a8 | https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/driver.py#L65-L70 | train | 22,881 |
numat/midas | midas/driver.py | GasDetector._parse | def _parse(self, registers):
"""Parse the response, returning a dictionary."""
result = {'ip': self.ip, 'connected': True}
decoder = BinaryPayloadDecoder.fromRegisters(registers,
byteorder=Endian.Big,
wordorder=Endian.Little)
# Register 40001 is a collection of alarm status signals
b = [decoder.decode_bits(), decoder.decode_bits()]
reg_40001 = b[1] + b[0]
# Bits 0-3 map to the monitor state
monitor_integer = sum(1 << i for i, b in enumerate(reg_40001[:4]) if b)
result['state'] = options['monitor state'][monitor_integer]
# Bits 4-5 map to fault status
fault_integer = sum(1 << i for i, b in enumerate(reg_40001[4:6]) if b)
result['fault'] = {'status': options['fault status'][fault_integer]}
# Bits 6 and 7 tell if low and high alarms are active
low, high = reg_40001[6:8]
result['alarm'] = options['alarm level'][low + high]
# Bits 8-10 tell if internal sensor relays 1-3 are energized. Skipping.
# Bit 11 is a heartbeat bit that toggles every two seconds. Skipping.
# Bit 12 tells if relays are under modbus control. Skipping.
# Remaining bits are empty. Skipping.
# Register 40002 has a gas ID and a sensor cartridge ID. Skipping.
decoder._pointer += 2
# Registers 40003-40004 are the gas concentration as a float
result['concentration'] = decoder.decode_32bit_float()
# Register 40005 is the concentration as an int. Skipping.
decoder._pointer += 2
# Register 40006 is the number of the most important fault.
fault_number = decoder.decode_16bit_uint()
if fault_number != 0:
code = ('m' if fault_number < 30 else 'F') + str(fault_number)
result['fault']['code'] = code
result['fault'].update(faults[code])
# Register 40007 holds the concentration unit in the second byte
# Instead of being an int, it's the position of the up bit
unit_bit = decoder.decode_bits().index(True)
result['units'] = options['concentration unit'][unit_bit]
decoder._pointer += 1
# Register 40008 holds the sensor temperature in Celsius
result['temperature'] = decoder.decode_16bit_int()
# Register 40009 holds number of hours remaining in cell life
result['life'] = decoder.decode_16bit_uint() / 24.0
# Register 40010 holds the number of heartbeats (16 LSB). Skipping.
decoder._pointer += 2
# Register 40011 is the sample flow rate in cc / min
result['flow'] = decoder.decode_16bit_uint()
# Register 40012 is blank. Skipping.
decoder._pointer += 2
# Registers 40013-40016 are the alarm concentration thresholds
result['low-alarm threshold'] = round(decoder.decode_32bit_float(), 6)
result['high-alarm threshold'] = round(decoder.decode_32bit_float(), 6)
# Despite what the manual says, thresholds are always reported in ppm.
# Let's fix that to match the concentration units.
if result['units'] == 'ppb':
result['concentration'] *= 1000
result['low-alarm threshold'] *= 1000
result['high-alarm threshold'] *= 1000
return result | python | def _parse(self, registers):
"""Parse the response, returning a dictionary."""
result = {'ip': self.ip, 'connected': True}
decoder = BinaryPayloadDecoder.fromRegisters(registers,
byteorder=Endian.Big,
wordorder=Endian.Little)
# Register 40001 is a collection of alarm status signals
b = [decoder.decode_bits(), decoder.decode_bits()]
reg_40001 = b[1] + b[0]
# Bits 0-3 map to the monitor state
monitor_integer = sum(1 << i for i, b in enumerate(reg_40001[:4]) if b)
result['state'] = options['monitor state'][monitor_integer]
# Bits 4-5 map to fault status
fault_integer = sum(1 << i for i, b in enumerate(reg_40001[4:6]) if b)
result['fault'] = {'status': options['fault status'][fault_integer]}
# Bits 6 and 7 tell if low and high alarms are active
low, high = reg_40001[6:8]
result['alarm'] = options['alarm level'][low + high]
# Bits 8-10 tell if internal sensor relays 1-3 are energized. Skipping.
# Bit 11 is a heartbeat bit that toggles every two seconds. Skipping.
# Bit 12 tells if relays are under modbus control. Skipping.
# Remaining bits are empty. Skipping.
# Register 40002 has a gas ID and a sensor cartridge ID. Skipping.
decoder._pointer += 2
# Registers 40003-40004 are the gas concentration as a float
result['concentration'] = decoder.decode_32bit_float()
# Register 40005 is the concentration as an int. Skipping.
decoder._pointer += 2
# Register 40006 is the number of the most important fault.
fault_number = decoder.decode_16bit_uint()
if fault_number != 0:
code = ('m' if fault_number < 30 else 'F') + str(fault_number)
result['fault']['code'] = code
result['fault'].update(faults[code])
# Register 40007 holds the concentration unit in the second byte
# Instead of being an int, it's the position of the up bit
unit_bit = decoder.decode_bits().index(True)
result['units'] = options['concentration unit'][unit_bit]
decoder._pointer += 1
# Register 40008 holds the sensor temperature in Celsius
result['temperature'] = decoder.decode_16bit_int()
# Register 40009 holds number of hours remaining in cell life
result['life'] = decoder.decode_16bit_uint() / 24.0
# Register 40010 holds the number of heartbeats (16 LSB). Skipping.
decoder._pointer += 2
# Register 40011 is the sample flow rate in cc / min
result['flow'] = decoder.decode_16bit_uint()
# Register 40012 is blank. Skipping.
decoder._pointer += 2
# Registers 40013-40016 are the alarm concentration thresholds
result['low-alarm threshold'] = round(decoder.decode_32bit_float(), 6)
result['high-alarm threshold'] = round(decoder.decode_32bit_float(), 6)
# Despite what the manual says, thresholds are always reported in ppm.
# Let's fix that to match the concentration units.
if result['units'] == 'ppb':
result['concentration'] *= 1000
result['low-alarm threshold'] *= 1000
result['high-alarm threshold'] *= 1000
return result | [
"def",
"_parse",
"(",
"self",
",",
"registers",
")",
":",
"result",
"=",
"{",
"'ip'",
":",
"self",
".",
"ip",
",",
"'connected'",
":",
"True",
"}",
"decoder",
"=",
"BinaryPayloadDecoder",
".",
"fromRegisters",
"(",
"registers",
",",
"byteorder",
"=",
"En... | Parse the response, returning a dictionary. | [
"Parse",
"the",
"response",
"returning",
"a",
"dictionary",
"."
] | c3a97a6cd67df1283831c3c78bf3f984212e97a8 | https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/driver.py#L72-L130 | train | 22,882 |
numat/midas | midas/util.py | AsyncioModbusClient._connect | async def _connect(self):
"""Start asynchronous reconnect loop."""
self.waiting = True
await self.client.start(self.ip)
self.waiting = False
if self.client.protocol is None:
raise IOError("Could not connect to '{}'.".format(self.ip))
self.open = True | python | async def _connect(self):
"""Start asynchronous reconnect loop."""
self.waiting = True
await self.client.start(self.ip)
self.waiting = False
if self.client.protocol is None:
raise IOError("Could not connect to '{}'.".format(self.ip))
self.open = True | [
"async",
"def",
"_connect",
"(",
"self",
")",
":",
"self",
".",
"waiting",
"=",
"True",
"await",
"self",
".",
"client",
".",
"start",
"(",
"self",
".",
"ip",
")",
"self",
".",
"waiting",
"=",
"False",
"if",
"self",
".",
"client",
".",
"protocol",
"... | Start asynchronous reconnect loop. | [
"Start",
"asynchronous",
"reconnect",
"loop",
"."
] | c3a97a6cd67df1283831c3c78bf3f984212e97a8 | https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/util.py#L36-L43 | train | 22,883 |
numat/midas | midas/util.py | AsyncioModbusClient.read_registers | async def read_registers(self, address, count):
"""Read modbus registers.
The Modbus protocol doesn't allow responses longer than 250 bytes
(ie. 125 registers, 62 DF addresses), which this function manages by
chunking larger requests.
"""
registers = []
while count > 124:
r = await self._request('read_holding_registers', address, 124)
registers += r.registers
address, count = address + 124, count - 124
r = await self._request('read_holding_registers', address, count)
registers += r.registers
return registers | python | async def read_registers(self, address, count):
"""Read modbus registers.
The Modbus protocol doesn't allow responses longer than 250 bytes
(ie. 125 registers, 62 DF addresses), which this function manages by
chunking larger requests.
"""
registers = []
while count > 124:
r = await self._request('read_holding_registers', address, 124)
registers += r.registers
address, count = address + 124, count - 124
r = await self._request('read_holding_registers', address, count)
registers += r.registers
return registers | [
"async",
"def",
"read_registers",
"(",
"self",
",",
"address",
",",
"count",
")",
":",
"registers",
"=",
"[",
"]",
"while",
"count",
">",
"124",
":",
"r",
"=",
"await",
"self",
".",
"_request",
"(",
"'read_holding_registers'",
",",
"address",
",",
"124",... | Read modbus registers.
The Modbus protocol doesn't allow responses longer than 250 bytes
(ie. 125 registers, 62 DF addresses), which this function manages by
chunking larger requests. | [
"Read",
"modbus",
"registers",
"."
] | c3a97a6cd67df1283831c3c78bf3f984212e97a8 | https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/util.py#L49-L63 | train | 22,884 |
numat/midas | midas/util.py | AsyncioModbusClient.write_register | async def write_register(self, address, value, skip_encode=False):
"""Write a modbus register."""
await self._request('write_registers', address, value, skip_encode=skip_encode) | python | async def write_register(self, address, value, skip_encode=False):
"""Write a modbus register."""
await self._request('write_registers', address, value, skip_encode=skip_encode) | [
"async",
"def",
"write_register",
"(",
"self",
",",
"address",
",",
"value",
",",
"skip_encode",
"=",
"False",
")",
":",
"await",
"self",
".",
"_request",
"(",
"'write_registers'",
",",
"address",
",",
"value",
",",
"skip_encode",
"=",
"skip_encode",
")"
] | Write a modbus register. | [
"Write",
"a",
"modbus",
"register",
"."
] | c3a97a6cd67df1283831c3c78bf3f984212e97a8 | https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/util.py#L73-L75 | train | 22,885 |
numat/midas | midas/util.py | AsyncioModbusClient.write_registers | async def write_registers(self, address, values, skip_encode=False):
"""Write modbus registers.
The Modbus protocol doesn't allow requests longer than 250 bytes
(ie. 125 registers, 62 DF addresses), which this function manages by
chunking larger requests.
"""
while len(values) > 62:
await self._request('write_registers',
address, values, skip_encode=skip_encode)
address, values = address + 124, values[62:]
await self._request('write_registers',
address, values, skip_encode=skip_encode) | python | async def write_registers(self, address, values, skip_encode=False):
"""Write modbus registers.
The Modbus protocol doesn't allow requests longer than 250 bytes
(ie. 125 registers, 62 DF addresses), which this function manages by
chunking larger requests.
"""
while len(values) > 62:
await self._request('write_registers',
address, values, skip_encode=skip_encode)
address, values = address + 124, values[62:]
await self._request('write_registers',
address, values, skip_encode=skip_encode) | [
"async",
"def",
"write_registers",
"(",
"self",
",",
"address",
",",
"values",
",",
"skip_encode",
"=",
"False",
")",
":",
"while",
"len",
"(",
"values",
")",
">",
"62",
":",
"await",
"self",
".",
"_request",
"(",
"'write_registers'",
",",
"address",
","... | Write modbus registers.
The Modbus protocol doesn't allow requests longer than 250 bytes
(ie. 125 registers, 62 DF addresses), which this function manages by
chunking larger requests. | [
"Write",
"modbus",
"registers",
"."
] | c3a97a6cd67df1283831c3c78bf3f984212e97a8 | https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/util.py#L77-L89 | train | 22,886 |
numat/midas | midas/util.py | AsyncioModbusClient._request | async def _request(self, method, *args, **kwargs):
"""Send a request to the device and awaits a response.
This mainly ensures that requests are sent serially, as the Modbus
protocol does not allow simultaneous requests (it'll ignore any
request sent while it's processing something). The driver handles this
by assuming there is only one client instance. If other clients
exist, other logic will have to be added to either prevent or manage
race conditions.
"""
if not self.open:
await self._connect()
while self.waiting:
await asyncio.sleep(0.1)
if self.client.protocol is None or not self.client.protocol.connected:
raise TimeoutError("Not connected to device.")
try:
future = getattr(self.client.protocol, method)(*args, **kwargs)
except AttributeError:
raise TimeoutError("Not connected to device.")
self.waiting = True
try:
return await asyncio.wait_for(future, timeout=self.timeout)
except asyncio.TimeoutError as e:
if self.open:
# This came from reading through the pymodbus@python3 source
# Problem was that the driver was not detecting disconnect
if hasattr(self, 'modbus'):
self.client.protocol_lost_connection(self.modbus)
self.open = False
raise TimeoutError(e)
except pymodbus.exceptions.ConnectionException as e:
raise ConnectionError(e)
finally:
self.waiting = False | python | async def _request(self, method, *args, **kwargs):
"""Send a request to the device and awaits a response.
This mainly ensures that requests are sent serially, as the Modbus
protocol does not allow simultaneous requests (it'll ignore any
request sent while it's processing something). The driver handles this
by assuming there is only one client instance. If other clients
exist, other logic will have to be added to either prevent or manage
race conditions.
"""
if not self.open:
await self._connect()
while self.waiting:
await asyncio.sleep(0.1)
if self.client.protocol is None or not self.client.protocol.connected:
raise TimeoutError("Not connected to device.")
try:
future = getattr(self.client.protocol, method)(*args, **kwargs)
except AttributeError:
raise TimeoutError("Not connected to device.")
self.waiting = True
try:
return await asyncio.wait_for(future, timeout=self.timeout)
except asyncio.TimeoutError as e:
if self.open:
# This came from reading through the pymodbus@python3 source
# Problem was that the driver was not detecting disconnect
if hasattr(self, 'modbus'):
self.client.protocol_lost_connection(self.modbus)
self.open = False
raise TimeoutError(e)
except pymodbus.exceptions.ConnectionException as e:
raise ConnectionError(e)
finally:
self.waiting = False | [
"async",
"def",
"_request",
"(",
"self",
",",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"open",
":",
"await",
"self",
".",
"_connect",
"(",
")",
"while",
"self",
".",
"waiting",
":",
"await",
"asyncio"... | Send a request to the device and awaits a response.
This mainly ensures that requests are sent serially, as the Modbus
protocol does not allow simultaneous requests (it'll ignore any
request sent while it's processing something). The driver handles this
by assuming there is only one client instance. If other clients
exist, other logic will have to be added to either prevent or manage
race conditions. | [
"Send",
"a",
"request",
"to",
"the",
"device",
"and",
"awaits",
"a",
"response",
"."
] | c3a97a6cd67df1283831c3c78bf3f984212e97a8 | https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/util.py#L91-L125 | train | 22,887 |
numat/midas | midas/util.py | AsyncioModbusClient._close | def _close(self):
"""Close the TCP connection."""
self.client.stop()
self.open = False
self.waiting = False | python | def _close(self):
"""Close the TCP connection."""
self.client.stop()
self.open = False
self.waiting = False | [
"def",
"_close",
"(",
"self",
")",
":",
"self",
".",
"client",
".",
"stop",
"(",
")",
"self",
".",
"open",
"=",
"False",
"self",
".",
"waiting",
"=",
"False"
] | Close the TCP connection. | [
"Close",
"the",
"TCP",
"connection",
"."
] | c3a97a6cd67df1283831c3c78bf3f984212e97a8 | https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/util.py#L127-L131 | train | 22,888 |
numat/midas | midas/__init__.py | command_line | def command_line():
"""Command-line tool for Midas gas detector communication."""
import argparse
import asyncio
import json
parser = argparse.ArgumentParser(description="Read a Honeywell Midas gas "
"detector state from the command line.")
parser.add_argument('address', help="The IP address of the gas detector.")
args = parser.parse_args()
async def get():
async with GasDetector(args.address) as detector:
print(json.dumps(await detector.get(), indent=4, sort_keys=True))
loop = asyncio.get_event_loop()
loop.run_until_complete(get())
loop.close() | python | def command_line():
"""Command-line tool for Midas gas detector communication."""
import argparse
import asyncio
import json
parser = argparse.ArgumentParser(description="Read a Honeywell Midas gas "
"detector state from the command line.")
parser.add_argument('address', help="The IP address of the gas detector.")
args = parser.parse_args()
async def get():
async with GasDetector(args.address) as detector:
print(json.dumps(await detector.get(), indent=4, sort_keys=True))
loop = asyncio.get_event_loop()
loop.run_until_complete(get())
loop.close() | [
"def",
"command_line",
"(",
")",
":",
"import",
"argparse",
"import",
"asyncio",
"import",
"json",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Read a Honeywell Midas gas \"",
"\"detector state from the command line.\"",
")",
"parser",
"... | Command-line tool for Midas gas detector communication. | [
"Command",
"-",
"line",
"tool",
"for",
"Midas",
"gas",
"detector",
"communication",
"."
] | c3a97a6cd67df1283831c3c78bf3f984212e97a8 | https://github.com/numat/midas/blob/c3a97a6cd67df1283831c3c78bf3f984212e97a8/midas/__init__.py#L11-L28 | train | 22,889 |
bioinf-jku/FCD | build/lib/fcd/FCD.py | build_masked_loss | def build_masked_loss(loss_function, mask_value):
"""Builds a loss function that masks based on targets
Args:
loss_function: The loss function to mask
mask_value: The value to mask in the targets
Returns:
function: a loss function that acts like loss_function with masked inputs
"""
def masked_loss_function(y_true, y_pred):
mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())
return loss_function(y_true * mask, y_pred * mask)
return masked_loss_function | python | def build_masked_loss(loss_function, mask_value):
"""Builds a loss function that masks based on targets
Args:
loss_function: The loss function to mask
mask_value: The value to mask in the targets
Returns:
function: a loss function that acts like loss_function with masked inputs
"""
def masked_loss_function(y_true, y_pred):
mask = K.cast(K.not_equal(y_true, mask_value), K.floatx())
return loss_function(y_true * mask, y_pred * mask)
return masked_loss_function | [
"def",
"build_masked_loss",
"(",
"loss_function",
",",
"mask_value",
")",
":",
"def",
"masked_loss_function",
"(",
"y_true",
",",
"y_pred",
")",
":",
"mask",
"=",
"K",
".",
"cast",
"(",
"K",
".",
"not_equal",
"(",
"y_true",
",",
"mask_value",
")",
",",
"... | Builds a loss function that masks based on targets
Args:
loss_function: The loss function to mask
mask_value: The value to mask in the targets
Returns:
function: a loss function that acts like loss_function with masked inputs | [
"Builds",
"a",
"loss",
"function",
"that",
"masks",
"based",
"on",
"targets"
] | fe542b16d72a2d0899989374e1a86cc930d891e1 | https://github.com/bioinf-jku/FCD/blob/fe542b16d72a2d0899989374e1a86cc930d891e1/build/lib/fcd/FCD.py#L88-L103 | train | 22,890 |
google/python-gflags | gflags2man.py | ProgramInfo.Run | def Run(self):
"""Run it and collect output.
Returns:
1 (true) If everything went well.
0 (false) If there were problems.
"""
if not self.executable:
logging.error('Could not locate "%s"' % self.long_name)
return 0
finfo = os.stat(self.executable)
self.date = time.localtime(finfo[stat.ST_MTIME])
logging.info('Running: %s %s </dev/null 2>&1'
% (self.executable, FLAGS.help_flag))
# --help output is often routed to stderr, so we combine with stdout.
# Re-direct stdin to /dev/null to encourage programs that
# don't understand --help to exit.
(child_stdin, child_stdout_and_stderr) = os.popen4(
[self.executable, FLAGS.help_flag])
child_stdin.close() # '</dev/null'
self.output = child_stdout_and_stderr.readlines()
child_stdout_and_stderr.close()
if len(self.output) < _MIN_VALID_USAGE_MSG:
logging.error('Error: "%s %s" returned only %d lines: %s'
% (self.name, FLAGS.help_flag,
len(self.output), self.output))
return 0
return 1 | python | def Run(self):
"""Run it and collect output.
Returns:
1 (true) If everything went well.
0 (false) If there were problems.
"""
if not self.executable:
logging.error('Could not locate "%s"' % self.long_name)
return 0
finfo = os.stat(self.executable)
self.date = time.localtime(finfo[stat.ST_MTIME])
logging.info('Running: %s %s </dev/null 2>&1'
% (self.executable, FLAGS.help_flag))
# --help output is often routed to stderr, so we combine with stdout.
# Re-direct stdin to /dev/null to encourage programs that
# don't understand --help to exit.
(child_stdin, child_stdout_and_stderr) = os.popen4(
[self.executable, FLAGS.help_flag])
child_stdin.close() # '</dev/null'
self.output = child_stdout_and_stderr.readlines()
child_stdout_and_stderr.close()
if len(self.output) < _MIN_VALID_USAGE_MSG:
logging.error('Error: "%s %s" returned only %d lines: %s'
% (self.name, FLAGS.help_flag,
len(self.output), self.output))
return 0
return 1 | [
"def",
"Run",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"executable",
":",
"logging",
".",
"error",
"(",
"'Could not locate \"%s\"'",
"%",
"self",
".",
"long_name",
")",
"return",
"0",
"finfo",
"=",
"os",
".",
"stat",
"(",
"self",
".",
"executab... | Run it and collect output.
Returns:
1 (true) If everything went well.
0 (false) If there were problems. | [
"Run",
"it",
"and",
"collect",
"output",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags2man.py#L185-L214 | train | 22,891 |
google/python-gflags | gflags2man.py | ProgramInfo.Parse | def Parse(self):
"""Parse program output."""
(start_line, lang) = self.ParseDesc()
if start_line < 0:
return
if 'python' == lang:
self.ParsePythonFlags(start_line)
elif 'c' == lang:
self.ParseCFlags(start_line)
elif 'java' == lang:
self.ParseJavaFlags(start_line) | python | def Parse(self):
"""Parse program output."""
(start_line, lang) = self.ParseDesc()
if start_line < 0:
return
if 'python' == lang:
self.ParsePythonFlags(start_line)
elif 'c' == lang:
self.ParseCFlags(start_line)
elif 'java' == lang:
self.ParseJavaFlags(start_line) | [
"def",
"Parse",
"(",
"self",
")",
":",
"(",
"start_line",
",",
"lang",
")",
"=",
"self",
".",
"ParseDesc",
"(",
")",
"if",
"start_line",
"<",
"0",
":",
"return",
"if",
"'python'",
"==",
"lang",
":",
"self",
".",
"ParsePythonFlags",
"(",
"start_line",
... | Parse program output. | [
"Parse",
"program",
"output",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags2man.py#L216-L226 | train | 22,892 |
google/python-gflags | gflags2man.py | ProgramInfo.ParseDesc | def ParseDesc(self, start_line=0):
"""Parse the initial description.
This could be Python or C++.
Returns:
(start_line, lang_type)
start_line Line to start parsing flags on (int)
lang_type Either 'python' or 'c'
(-1, '') if the flags start could not be found
"""
exec_mod_start = self.executable + ':'
after_blank = 0
start_line = 0 # ignore the passed-in arg for now (?)
for start_line in range(start_line, len(self.output)): # collect top description
line = self.output[start_line].rstrip()
# Python flags start with 'flags:\n'
if ('flags:' == line
and len(self.output) > start_line+1
and '' == self.output[start_line+1].rstrip()):
start_line += 2
logging.debug('Flags start (python): %s' % line)
return (start_line, 'python')
# SWIG flags just have the module name followed by colon.
if exec_mod_start == line:
logging.debug('Flags start (swig): %s' % line)
return (start_line, 'python')
# C++ flags begin after a blank line and with a constant string
if after_blank and line.startswith(' Flags from '):
logging.debug('Flags start (c): %s' % line)
return (start_line, 'c')
# java flags begin with a constant string
if line == 'where flags are':
logging.debug('Flags start (java): %s' % line)
start_line += 2 # skip "Standard flags:"
return (start_line, 'java')
logging.debug('Desc: %s' % line)
self.desc.append(line)
after_blank = (line == '')
else:
logging.warn('Never found the start of the flags section for "%s"!'
% self.long_name)
return (-1, '') | python | def ParseDesc(self, start_line=0):
"""Parse the initial description.
This could be Python or C++.
Returns:
(start_line, lang_type)
start_line Line to start parsing flags on (int)
lang_type Either 'python' or 'c'
(-1, '') if the flags start could not be found
"""
exec_mod_start = self.executable + ':'
after_blank = 0
start_line = 0 # ignore the passed-in arg for now (?)
for start_line in range(start_line, len(self.output)): # collect top description
line = self.output[start_line].rstrip()
# Python flags start with 'flags:\n'
if ('flags:' == line
and len(self.output) > start_line+1
and '' == self.output[start_line+1].rstrip()):
start_line += 2
logging.debug('Flags start (python): %s' % line)
return (start_line, 'python')
# SWIG flags just have the module name followed by colon.
if exec_mod_start == line:
logging.debug('Flags start (swig): %s' % line)
return (start_line, 'python')
# C++ flags begin after a blank line and with a constant string
if after_blank and line.startswith(' Flags from '):
logging.debug('Flags start (c): %s' % line)
return (start_line, 'c')
# java flags begin with a constant string
if line == 'where flags are':
logging.debug('Flags start (java): %s' % line)
start_line += 2 # skip "Standard flags:"
return (start_line, 'java')
logging.debug('Desc: %s' % line)
self.desc.append(line)
after_blank = (line == '')
else:
logging.warn('Never found the start of the flags section for "%s"!'
% self.long_name)
return (-1, '') | [
"def",
"ParseDesc",
"(",
"self",
",",
"start_line",
"=",
"0",
")",
":",
"exec_mod_start",
"=",
"self",
".",
"executable",
"+",
"':'",
"after_blank",
"=",
"0",
"start_line",
"=",
"0",
"# ignore the passed-in arg for now (?)",
"for",
"start_line",
"in",
"range",
... | Parse the initial description.
This could be Python or C++.
Returns:
(start_line, lang_type)
start_line Line to start parsing flags on (int)
lang_type Either 'python' or 'c'
(-1, '') if the flags start could not be found | [
"Parse",
"the",
"initial",
"description",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags2man.py#L228-L272 | train | 22,893 |
google/python-gflags | gflags2man.py | ProgramInfo.ParseCFlags | def ParseCFlags(self, start_line=0):
"""Parse C style flags."""
modname = None # name of current module
modlist = []
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
if not line: # blank lines terminate flags
if flag: # save last flag
modlist.append(flag)
flag = None
continue
mobj = self.module_c_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_c_re.match(line)
if mobj: # start of a new flag
if flag: # save last flag
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
# append to flag help. type and default are part of the main text
if flag:
flag.help += ' ' + line.strip()
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag) | python | def ParseCFlags(self, start_line=0):
"""Parse C style flags."""
modname = None # name of current module
modlist = []
flag = None
for line_num in range(start_line, len(self.output)): # collect flags
line = self.output[line_num].rstrip()
if not line: # blank lines terminate flags
if flag: # save last flag
modlist.append(flag)
flag = None
continue
mobj = self.module_c_re.match(line)
if mobj: # start of a new module
modname = mobj.group(1)
logging.debug('Module: %s' % line)
if flag:
modlist.append(flag)
self.module_list.append(modname)
self.modules.setdefault(modname, [])
modlist = self.modules[modname]
flag = None
continue
mobj = self.flag_c_re.match(line)
if mobj: # start of a new flag
if flag: # save last flag
modlist.append(flag)
logging.debug('Flag: %s' % line)
flag = Flag(mobj.group(1), mobj.group(2))
continue
# append to flag help. type and default are part of the main text
if flag:
flag.help += ' ' + line.strip()
else:
logging.info('Extra: %s' % line)
if flag:
modlist.append(flag) | [
"def",
"ParseCFlags",
"(",
"self",
",",
"start_line",
"=",
"0",
")",
":",
"modname",
"=",
"None",
"# name of current module",
"modlist",
"=",
"[",
"]",
"flag",
"=",
"None",
"for",
"line_num",
"in",
"range",
"(",
"start_line",
",",
"len",
"(",
"self",
"."... | Parse C style flags. | [
"Parse",
"C",
"style",
"flags",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags2man.py#L323-L362 | train | 22,894 |
google/python-gflags | gflags2man.py | ProgramInfo.Filter | def Filter(self):
"""Filter parsed data to create derived fields."""
if not self.desc:
self.short_desc = ''
return
for i in range(len(self.desc)): # replace full path with name
if self.desc[i].find(self.executable) >= 0:
self.desc[i] = self.desc[i].replace(self.executable, self.name)
self.short_desc = self.desc[0]
word_list = self.short_desc.split(' ')
all_names = [ self.name, self.short_name, ]
# Since the short_desc is always listed right after the name,
# trim it from the short_desc
while word_list and (word_list[0] in all_names
or word_list[0].lower() in all_names):
del word_list[0]
self.short_desc = '' # signal need to reconstruct
if not self.short_desc and word_list:
self.short_desc = ' '.join(word_list) | python | def Filter(self):
"""Filter parsed data to create derived fields."""
if not self.desc:
self.short_desc = ''
return
for i in range(len(self.desc)): # replace full path with name
if self.desc[i].find(self.executable) >= 0:
self.desc[i] = self.desc[i].replace(self.executable, self.name)
self.short_desc = self.desc[0]
word_list = self.short_desc.split(' ')
all_names = [ self.name, self.short_name, ]
# Since the short_desc is always listed right after the name,
# trim it from the short_desc
while word_list and (word_list[0] in all_names
or word_list[0].lower() in all_names):
del word_list[0]
self.short_desc = '' # signal need to reconstruct
if not self.short_desc and word_list:
self.short_desc = ' '.join(word_list) | [
"def",
"Filter",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"desc",
":",
"self",
".",
"short_desc",
"=",
"''",
"return",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"desc",
")",
")",
":",
"# replace full path with name",
"if",
"sel... | Filter parsed data to create derived fields. | [
"Filter",
"parsed",
"data",
"to",
"create",
"derived",
"fields",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags2man.py#L411-L431 | train | 22,895 |
google/python-gflags | gflags2man.py | GenerateDoc.Output | def Output(self):
"""Output all sections of the page."""
self.Open()
self.Header()
self.Body()
self.Footer() | python | def Output(self):
"""Output all sections of the page."""
self.Open()
self.Header()
self.Body()
self.Footer() | [
"def",
"Output",
"(",
"self",
")",
":",
"self",
".",
"Open",
"(",
")",
"self",
".",
"Header",
"(",
")",
"self",
".",
"Body",
"(",
")",
"self",
".",
"Footer",
"(",
")"
] | Output all sections of the page. | [
"Output",
"all",
"sections",
"of",
"the",
"page",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags2man.py#L446-L451 | train | 22,896 |
google/python-gflags | gflags/_helpers.py | GetFlagSuggestions | def GetFlagSuggestions(attempt, longopt_list):
"""Get helpful similar matches for an invalid flag."""
# Don't suggest on very short strings, or if no longopts are specified.
if len(attempt) <= 2 or not longopt_list:
return []
option_names = [v.split('=')[0] for v in longopt_list]
# Find close approximations in flag prefixes.
# This also handles the case where the flag is spelled right but ambiguous.
distances = [(_DamerauLevenshtein(attempt, option[0:len(attempt)]), option)
for option in option_names]
distances.sort(key=lambda t: t[0])
least_errors, _ = distances[0]
# Don't suggest excessively bad matches.
if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt):
return []
suggestions = []
for errors, name in distances:
if errors == least_errors:
suggestions.append(name)
else:
break
return suggestions | python | def GetFlagSuggestions(attempt, longopt_list):
"""Get helpful similar matches for an invalid flag."""
# Don't suggest on very short strings, or if no longopts are specified.
if len(attempt) <= 2 or not longopt_list:
return []
option_names = [v.split('=')[0] for v in longopt_list]
# Find close approximations in flag prefixes.
# This also handles the case where the flag is spelled right but ambiguous.
distances = [(_DamerauLevenshtein(attempt, option[0:len(attempt)]), option)
for option in option_names]
distances.sort(key=lambda t: t[0])
least_errors, _ = distances[0]
# Don't suggest excessively bad matches.
if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt):
return []
suggestions = []
for errors, name in distances:
if errors == least_errors:
suggestions.append(name)
else:
break
return suggestions | [
"def",
"GetFlagSuggestions",
"(",
"attempt",
",",
"longopt_list",
")",
":",
"# Don't suggest on very short strings, or if no longopts are specified.",
"if",
"len",
"(",
"attempt",
")",
"<=",
"2",
"or",
"not",
"longopt_list",
":",
"return",
"[",
"]",
"option_names",
"=... | Get helpful similar matches for an invalid flag. | [
"Get",
"helpful",
"similar",
"matches",
"for",
"an",
"invalid",
"flag",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/_helpers.py#L216-L241 | train | 22,897 |
google/python-gflags | gflags/_helpers.py | _DamerauLevenshtein | def _DamerauLevenshtein(a, b):
"""Damerau-Levenshtein edit distance from a to b."""
memo = {}
def Distance(x, y):
"""Recursively defined string distance with memoization."""
if (x, y) in memo:
return memo[x, y]
if not x:
d = len(y)
elif not y:
d = len(x)
else:
d = min(
Distance(x[1:], y) + 1, # correct an insertion error
Distance(x, y[1:]) + 1, # correct a deletion error
Distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character
if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]:
# Correct a transposition.
t = Distance(x[2:], y[2:]) + 1
if d > t:
d = t
memo[x, y] = d
return d
return Distance(a, b) | python | def _DamerauLevenshtein(a, b):
"""Damerau-Levenshtein edit distance from a to b."""
memo = {}
def Distance(x, y):
"""Recursively defined string distance with memoization."""
if (x, y) in memo:
return memo[x, y]
if not x:
d = len(y)
elif not y:
d = len(x)
else:
d = min(
Distance(x[1:], y) + 1, # correct an insertion error
Distance(x, y[1:]) + 1, # correct a deletion error
Distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character
if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]:
# Correct a transposition.
t = Distance(x[2:], y[2:]) + 1
if d > t:
d = t
memo[x, y] = d
return d
return Distance(a, b) | [
"def",
"_DamerauLevenshtein",
"(",
"a",
",",
"b",
")",
":",
"memo",
"=",
"{",
"}",
"def",
"Distance",
"(",
"x",
",",
"y",
")",
":",
"\"\"\"Recursively defined string distance with memoization.\"\"\"",
"if",
"(",
"x",
",",
"y",
")",
"in",
"memo",
":",
"retu... | Damerau-Levenshtein edit distance from a to b. | [
"Damerau",
"-",
"Levenshtein",
"edit",
"distance",
"from",
"a",
"to",
"b",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/_helpers.py#L244-L269 | train | 22,898 |
google/python-gflags | gflags/_helpers.py | FlagDictToArgs | def FlagDictToArgs(flag_map):
"""Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is None, then only the name is emitted.
* If value is True, then only the name is emitted.
* If value is False, then only the name prepended with 'no' is emitted.
* If value is a string then --name=value is emitted.
* If value is a collection, this will emit --name=value1,value2,value3.
* Everything else is converted to string an passed as such.
Yields:
sequence of string suitable for a subprocess execution.
"""
for key, value in six.iteritems(flag_map):
if value is None:
yield '--%s' % key
elif isinstance(value, bool):
if value:
yield '--%s' % key
else:
yield '--no%s' % key
elif isinstance(value, (bytes, type(u''))):
# We don't want strings to be handled like python collections.
yield '--%s=%s' % (key, value)
else:
# Now we attempt to deal with collections.
try:
yield '--%s=%s' % (key, ','.join(str(item) for item in value))
except TypeError:
# Default case.
yield '--%s=%s' % (key, value) | python | def FlagDictToArgs(flag_map):
"""Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is None, then only the name is emitted.
* If value is True, then only the name is emitted.
* If value is False, then only the name prepended with 'no' is emitted.
* If value is a string then --name=value is emitted.
* If value is a collection, this will emit --name=value1,value2,value3.
* Everything else is converted to string an passed as such.
Yields:
sequence of string suitable for a subprocess execution.
"""
for key, value in six.iteritems(flag_map):
if value is None:
yield '--%s' % key
elif isinstance(value, bool):
if value:
yield '--%s' % key
else:
yield '--no%s' % key
elif isinstance(value, (bytes, type(u''))):
# We don't want strings to be handled like python collections.
yield '--%s=%s' % (key, value)
else:
# Now we attempt to deal with collections.
try:
yield '--%s=%s' % (key, ','.join(str(item) for item in value))
except TypeError:
# Default case.
yield '--%s=%s' % (key, value) | [
"def",
"FlagDictToArgs",
"(",
"flag_map",
")",
":",
"for",
"key",
",",
"value",
"in",
"six",
".",
"iteritems",
"(",
"flag_map",
")",
":",
"if",
"value",
"is",
"None",
":",
"yield",
"'--%s'",
"%",
"key",
"elif",
"isinstance",
"(",
"value",
",",
"bool",
... | Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is None, then only the name is emitted.
* If value is True, then only the name is emitted.
* If value is False, then only the name prepended with 'no' is emitted.
* If value is a string then --name=value is emitted.
* If value is a collection, this will emit --name=value1,value2,value3.
* Everything else is converted to string an passed as such.
Yields:
sequence of string suitable for a subprocess execution. | [
"Convert",
"a",
"dict",
"of",
"values",
"into",
"process",
"call",
"parameters",
"."
] | 4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6 | https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags/_helpers.py#L329-L364 | train | 22,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.