repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
ottogroup/palladium | palladium/server.py | stream_cmd | python | def stream_cmd(argv=sys.argv[1:]): # pragma: no cover
docopt(stream_cmd.__doc__, argv=argv)
initialize_config()
stream = PredictStream()
stream.listen(sys.stdin, sys.stdout, sys.stderr) | \
Start the streaming server, which listens to stdin, processes line
by line, and returns predictions.
The input should consist of a list of json objects, where each object
will result in a prediction. Each line is processed in a batch.
Example input (must be on a single line):
[{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7,
"petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0,
"petal length": 1.4, "petal width": 5}]
Example output:
["Iris-virginica","Iris-setosa"]
An input line with the word 'exit' will quit the streaming server.
Usage:
pld-stream [options]
Options:
-h --help Show this screen. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L362-L391 | [
"def initialize_config(**extra):\n if _config.initialized:\n raise RuntimeError(\"Configuration was already initialized\")\n return get_config(**extra)\n",
"def listen(self, io_in, io_out, io_err):\n \"\"\"Listens to provided io stream and writes predictions\n to output. In case of errors, the error stream will be used.\n \"\"\"\n for line in io_in:\n if line.strip().lower() == 'exit':\n break\n\n try:\n y_pred = self.process_line(line)\n except Exception as e:\n io_out.write('[]\\n')\n io_err.write(\n \"Error while processing input row: {}\"\n \"{}: {}\\n\".format(line, type(e), e))\n io_err.flush()\n else:\n io_out.write(ujson.dumps(y_pred.tolist()))\n io_out.write('\\n')\n io_out.flush()\n"
] | """HTTP API implementation.
"""
import sys
from docopt import docopt
from flask import Flask
from flask import make_response
from flask import request
import numpy as np
import ujson
from werkzeug.exceptions import BadRequest
from . import __version__
from .fit import activate as activate_base
from .fit import fit as fit_base
from .interfaces import PredictError
from .util import args_from_config
from .util import get_config
from .util import get_metadata
from .util import initialize_config
from .util import logger
from .util import memory_usage_psutil
from .util import PluggableDecorator
from .util import process_store
from .util import run_job
from .util import resolve_dotted_name
app = Flask(__name__)
def make_ujson_response(obj, status_code=200):
"""Encodes the given *obj* to json and wraps it in a response.
:return:
A Flask response.
"""
json_encoded = ujson.encode(obj, ensure_ascii=False, double_precision=-1)
resp = make_response(json_encoded)
resp.mimetype = 'application/json'
resp.content_type = 'application/json; charset=utf-8'
resp.status_code = status_code
return resp
class PredictService:
"""A default :class:`palladium.interfaces.PredictService`
implementation.
Aims to work out of the box for the most standard use cases.
Allows overriding of specific parts of its logic by using granular
methods to compose the work.
"""
types = {
'float': float,
'int': int,
'str': str,
'bool': lambda x: x.lower() == 'true',
}
def __init__(
self,
mapping,
params=(),
entry_point='/predict',
decorator_list_name='predict_decorators',
predict_proba=False,
unwrap_sample=False,
**kwargs
):
"""
:param mapping:
A list of query parameters and their type that should be
included in the request. These will be processed in the
:meth:`sample_from_data` method to construct a sample
that can be used for prediction. An example that expects
two request parameters called ``pos`` and ``neg`` that are
both of type str::
{ ...
'mapping': [('pos', 'str'), ('neg', 'str')]
... }
:param params:
Similarly to *mapping*, this is a list of name and type of
parameters that will be passed to the model's
:meth:`~palladium.interfaces.Model.predict` method as keyword
arguments.
:param predict_proba:
Instead of returning a single class (the default), when
*predict_proba* is set to true, the result will instead
contain a list of class probabilities.
:param unwrap_sample:
When working with text, scikit-learn and others will
sometimes expect the input to be a 1d array of strings
rather than a 2d array. Setting *unwrap_sample* to true
will use this representation.
"""
self.mapping = mapping
self.params = params
self.entry_point = entry_point
self.decorator_list_name = decorator_list_name
self.predict_proba = predict_proba
self.unwrap_sample = unwrap_sample
vars(self).update(kwargs)
def initialize_component(self, config):
create_predict_function(
self.entry_point, self, self.decorator_list_name, config)
def __call__(self, model, request):
try:
return self.do(model, request)
except Exception as e:
return self.response_from_exception(e)
def do(self, model, request):
if request.method == 'GET':
single = True
samples = np.array([self.sample_from_data(model, request.args)])
else:
single = False
samples = []
for data in request.json:
samples.append(self.sample_from_data(model, data))
samples = np.array(samples)
params = self.params_from_data(model, request.args)
y_pred = self.predict(model, samples, **params)
return self.response_from_prediction(y_pred, single=single)
def sample_from_data(self, model, data):
"""Convert incoming sample *data* into a numpy array.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the sample's data, typically retrieved from
``request.args`` or similar.
"""
values = []
for key, type_name in self.mapping:
value_type = self.types[type_name]
values.append(value_type(data[key]))
if self.unwrap_sample:
assert len(values) == 1
return np.array(values[0])
else:
return np.array(values, dtype=object)
def params_from_data(self, model, data):
"""Retrieve additional parameters (keyword arguments) for
``model.predict`` from request *data*.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the parameter data, typically retrieved
from ``request.args`` or similar.
"""
params = {}
for key, type_name in self.params:
value_type = self.types[type_name]
if key in data:
params[key] = value_type(data[key])
elif hasattr(model, key):
params[key] = getattr(model, key)
return params
def predict(self, model, sample, **kwargs):
if self.predict_proba:
return model.predict_proba(sample, **kwargs)
else:
return model.predict(sample, **kwargs)
def response_from_prediction(self, y_pred, single=True):
"""Turns a model's prediction in *y_pred* into a JSON
response.
"""
result = y_pred.tolist()
if single:
result = result[0]
response = {
'metadata': get_metadata(),
'result': result,
}
return make_ujson_response(response, status_code=200)
def response_from_exception(self, exc):
if isinstance(exc, PredictError):
return make_ujson_response({
'metadata': get_metadata(
error_code=exc.error_code,
error_message=exc.error_message,
status="ERROR"
)
}, status_code=500)
elif isinstance(exc, BadRequest):
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="BadRequest: {}".format(exc.args),
status="ERROR"
)
}, status_code=400)
else:
logger.exception("Unexpected error")
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="{}: {}".format(
exc.__class__.__name__, str(exc)),
status="ERROR"
)
}, status_code=500)
def predict(model_persister, predict_service):
try:
model = model_persister.read()
response = predict_service(model, request)
except Exception as exc:
logger.exception("Unexpected error")
response = make_ujson_response({
"status": "ERROR",
"error_code": -1,
"error_message": "{}: {}".format(exc.__class__.__name__, str(exc)),
}, status_code=500)
return response
@app.route('/alive')
@PluggableDecorator('alive_decorators')
@args_from_config
def alive(alive=None):
if alive is None:
alive = {}
mem, mem_vms = memory_usage_psutil()
info = {
'memory_usage': mem, # rss, resident set size
'memory_usage_vms': mem_vms, # vms, virtual memory size
'palladium_version': __version__,
}
info['service_metadata'] = get_config().get('service_metadata', {})
status_code = 200
for attr in alive.get('process_store_required', ()):
obj = process_store.get(attr)
if obj is not None:
obj_info = {}
obj_info['updated'] = process_store.mtime[attr].isoformat()
if hasattr(obj, '__metadata__'):
obj_info['metadata'] = obj.__metadata__
info[attr] = obj_info
else:
info[attr] = "N/A"
status_code = 503
info['process_metadata'] = process_store['process_metadata']
return make_ujson_response(info, status_code=status_code)
def create_predict_function(
route, predict_service, decorator_list_name, config):
"""Creates a predict function and registers it to
the Flask app using the route decorator.
:param str route:
Path of the entry point.
:param palladium.interfaces.PredictService predict_service:
The predict service to be registered to this entry point.
:param str decorator_list_name:
The decorator list to be used for this predict service. It is
OK if there is no such entry in the active Palladium config.
:return:
A predict service function that will be used to process
predict requests.
"""
model_persister = config.get('model_persister')
@app.route(route, methods=['GET', 'POST'], endpoint=route)
@PluggableDecorator(decorator_list_name)
def predict_func():
return predict(model_persister, predict_service)
return predict_func
def devserver_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Serve the web API for development.
Usage:
pld-devserver [options]
Options:
-h --help Show this screen.
--host=<host> The host to use [default: 0.0.0.0].
--port=<port> The port to use [default: 5000].
--debug=<debug> Whether or not to use debug mode [default: 0].
"""
arguments = docopt(devserver_cmd.__doc__, argv=argv)
initialize_config()
app.run(
host=arguments['--host'],
port=int(arguments['--port']),
debug=int(arguments['--debug']),
)
class PredictStream:
"""A class that helps make predictions through stdin and stdout.
"""
def __init__(self):
self.model = get_config()['model_persister'].read()
self.predict_service = get_config()['predict_service']
def process_line(self, line):
predict_service = self.predict_service
datas = ujson.loads(line)
samples = [predict_service.sample_from_data(self.model, data)
for data in datas]
samples = np.array(samples)
params = predict_service.params_from_data(self.model, datas[0])
return predict_service.predict(self.model, samples, **params)
def listen(self, io_in, io_out, io_err):
"""Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used.
"""
for line in io_in:
if line.strip().lower() == 'exit':
break
try:
y_pred = self.process_line(line)
except Exception as e:
io_out.write('[]\n')
io_err.write(
"Error while processing input row: {}"
"{}: {}\n".format(line, type(e), e))
io_err.flush()
else:
io_out.write(ujson.dumps(y_pred.tolist()))
io_out.write('\n')
io_out.flush()
@app.route('/list')
@PluggableDecorator('list_decorators')
@args_from_config
def list(model_persister):
info = {
'models': model_persister.list_models(),
'properties': model_persister.list_properties(),
}
return make_ujson_response(info)
@PluggableDecorator('server_fit_decorators')
@args_from_config
def fit():
param_converters = {
'persist': lambda x: x.lower() in ('1', 't', 'true'),
'activate': lambda x: x.lower() in ('1', 't', 'true'),
'evaluate': lambda x: x.lower() in ('1', 't', 'true'),
'persist_if_better_than': float,
}
params = {
name: typ(request.form[name])
for name, typ in param_converters.items()
if name in request.form
}
thread, job_id = run_job(fit_base, **params)
return make_ujson_response({'job_id': job_id}, status_code=200)
@PluggableDecorator('update_model_cache_decorators')
@args_from_config
def update_model_cache(model_persister):
method = getattr(model_persister, 'update_cache', None)
if method is not None:
thread, job_id = run_job(model_persister.update_cache)
return make_ujson_response({'job_id': job_id}, status_code=200)
else:
return make_ujson_response({}, status_code=503)
@PluggableDecorator('activate_decorators')
def activate():
model_version = int(request.form['model_version'])
try:
activate_base(model_version=model_version)
except LookupError:
return make_ujson_response({}, status_code=503)
else:
return list()
def add_url_rule(rule, endpoint=None, view_func=None, app=app, **options):
if isinstance(view_func, str):
view_func = resolve_dotted_name(view_func)
app.add_url_rule(rule, endpoint=endpoint, view_func=view_func, **options)
|
ottogroup/palladium | palladium/server.py | PredictService.sample_from_data | python | def sample_from_data(self, model, data):
values = []
for key, type_name in self.mapping:
value_type = self.types[type_name]
values.append(value_type(data[key]))
if self.unwrap_sample:
assert len(values) == 1
return np.array(values[0])
else:
return np.array(values, dtype=object) | Convert incoming sample *data* into a numpy array.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the sample's data, typically retrieved from
``request.args`` or similar. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L135-L152 | null | class PredictService:
"""A default :class:`palladium.interfaces.PredictService`
implementation.
Aims to work out of the box for the most standard use cases.
Allows overriding of specific parts of its logic by using granular
methods to compose the work.
"""
types = {
'float': float,
'int': int,
'str': str,
'bool': lambda x: x.lower() == 'true',
}
def __init__(
self,
mapping,
params=(),
entry_point='/predict',
decorator_list_name='predict_decorators',
predict_proba=False,
unwrap_sample=False,
**kwargs
):
"""
:param mapping:
A list of query parameters and their type that should be
included in the request. These will be processed in the
:meth:`sample_from_data` method to construct a sample
that can be used for prediction. An example that expects
two request parameters called ``pos`` and ``neg`` that are
both of type str::
{ ...
'mapping': [('pos', 'str'), ('neg', 'str')]
... }
:param params:
Similarly to *mapping*, this is a list of name and type of
parameters that will be passed to the model's
:meth:`~palladium.interfaces.Model.predict` method as keyword
arguments.
:param predict_proba:
Instead of returning a single class (the default), when
*predict_proba* is set to true, the result will instead
contain a list of class probabilities.
:param unwrap_sample:
When working with text, scikit-learn and others will
sometimes expect the input to be a 1d array of strings
rather than a 2d array. Setting *unwrap_sample* to true
will use this representation.
"""
self.mapping = mapping
self.params = params
self.entry_point = entry_point
self.decorator_list_name = decorator_list_name
self.predict_proba = predict_proba
self.unwrap_sample = unwrap_sample
vars(self).update(kwargs)
def initialize_component(self, config):
create_predict_function(
self.entry_point, self, self.decorator_list_name, config)
def __call__(self, model, request):
try:
return self.do(model, request)
except Exception as e:
return self.response_from_exception(e)
def do(self, model, request):
if request.method == 'GET':
single = True
samples = np.array([self.sample_from_data(model, request.args)])
else:
single = False
samples = []
for data in request.json:
samples.append(self.sample_from_data(model, data))
samples = np.array(samples)
params = self.params_from_data(model, request.args)
y_pred = self.predict(model, samples, **params)
return self.response_from_prediction(y_pred, single=single)
def params_from_data(self, model, data):
"""Retrieve additional parameters (keyword arguments) for
``model.predict`` from request *data*.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the parameter data, typically retrieved
from ``request.args`` or similar.
"""
params = {}
for key, type_name in self.params:
value_type = self.types[type_name]
if key in data:
params[key] = value_type(data[key])
elif hasattr(model, key):
params[key] = getattr(model, key)
return params
def predict(self, model, sample, **kwargs):
if self.predict_proba:
return model.predict_proba(sample, **kwargs)
else:
return model.predict(sample, **kwargs)
def response_from_prediction(self, y_pred, single=True):
"""Turns a model's prediction in *y_pred* into a JSON
response.
"""
result = y_pred.tolist()
if single:
result = result[0]
response = {
'metadata': get_metadata(),
'result': result,
}
return make_ujson_response(response, status_code=200)
def response_from_exception(self, exc):
if isinstance(exc, PredictError):
return make_ujson_response({
'metadata': get_metadata(
error_code=exc.error_code,
error_message=exc.error_message,
status="ERROR"
)
}, status_code=500)
elif isinstance(exc, BadRequest):
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="BadRequest: {}".format(exc.args),
status="ERROR"
)
}, status_code=400)
else:
logger.exception("Unexpected error")
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="{}: {}".format(
exc.__class__.__name__, str(exc)),
status="ERROR"
)
}, status_code=500)
|
ottogroup/palladium | palladium/server.py | PredictService.params_from_data | python | def params_from_data(self, model, data):
params = {}
for key, type_name in self.params:
value_type = self.types[type_name]
if key in data:
params[key] = value_type(data[key])
elif hasattr(model, key):
params[key] = getattr(model, key)
return params | Retrieve additional parameters (keyword arguments) for
``model.predict`` from request *data*.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the parameter data, typically retrieved
from ``request.args`` or similar. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L154-L171 | null | class PredictService:
"""A default :class:`palladium.interfaces.PredictService`
implementation.
Aims to work out of the box for the most standard use cases.
Allows overriding of specific parts of its logic by using granular
methods to compose the work.
"""
types = {
'float': float,
'int': int,
'str': str,
'bool': lambda x: x.lower() == 'true',
}
def __init__(
self,
mapping,
params=(),
entry_point='/predict',
decorator_list_name='predict_decorators',
predict_proba=False,
unwrap_sample=False,
**kwargs
):
"""
:param mapping:
A list of query parameters and their type that should be
included in the request. These will be processed in the
:meth:`sample_from_data` method to construct a sample
that can be used for prediction. An example that expects
two request parameters called ``pos`` and ``neg`` that are
both of type str::
{ ...
'mapping': [('pos', 'str'), ('neg', 'str')]
... }
:param params:
Similarly to *mapping*, this is a list of name and type of
parameters that will be passed to the model's
:meth:`~palladium.interfaces.Model.predict` method as keyword
arguments.
:param predict_proba:
Instead of returning a single class (the default), when
*predict_proba* is set to true, the result will instead
contain a list of class probabilities.
:param unwrap_sample:
When working with text, scikit-learn and others will
sometimes expect the input to be a 1d array of strings
rather than a 2d array. Setting *unwrap_sample* to true
will use this representation.
"""
self.mapping = mapping
self.params = params
self.entry_point = entry_point
self.decorator_list_name = decorator_list_name
self.predict_proba = predict_proba
self.unwrap_sample = unwrap_sample
vars(self).update(kwargs)
def initialize_component(self, config):
create_predict_function(
self.entry_point, self, self.decorator_list_name, config)
def __call__(self, model, request):
try:
return self.do(model, request)
except Exception as e:
return self.response_from_exception(e)
def do(self, model, request):
if request.method == 'GET':
single = True
samples = np.array([self.sample_from_data(model, request.args)])
else:
single = False
samples = []
for data in request.json:
samples.append(self.sample_from_data(model, data))
samples = np.array(samples)
params = self.params_from_data(model, request.args)
y_pred = self.predict(model, samples, **params)
return self.response_from_prediction(y_pred, single=single)
def sample_from_data(self, model, data):
"""Convert incoming sample *data* into a numpy array.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the sample's data, typically retrieved from
``request.args`` or similar.
"""
values = []
for key, type_name in self.mapping:
value_type = self.types[type_name]
values.append(value_type(data[key]))
if self.unwrap_sample:
assert len(values) == 1
return np.array(values[0])
else:
return np.array(values, dtype=object)
def predict(self, model, sample, **kwargs):
if self.predict_proba:
return model.predict_proba(sample, **kwargs)
else:
return model.predict(sample, **kwargs)
def response_from_prediction(self, y_pred, single=True):
"""Turns a model's prediction in *y_pred* into a JSON
response.
"""
result = y_pred.tolist()
if single:
result = result[0]
response = {
'metadata': get_metadata(),
'result': result,
}
return make_ujson_response(response, status_code=200)
def response_from_exception(self, exc):
if isinstance(exc, PredictError):
return make_ujson_response({
'metadata': get_metadata(
error_code=exc.error_code,
error_message=exc.error_message,
status="ERROR"
)
}, status_code=500)
elif isinstance(exc, BadRequest):
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="BadRequest: {}".format(exc.args),
status="ERROR"
)
}, status_code=400)
else:
logger.exception("Unexpected error")
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="{}: {}".format(
exc.__class__.__name__, str(exc)),
status="ERROR"
)
}, status_code=500)
|
ottogroup/palladium | palladium/server.py | PredictService.response_from_prediction | python | def response_from_prediction(self, y_pred, single=True):
result = y_pred.tolist()
if single:
result = result[0]
response = {
'metadata': get_metadata(),
'result': result,
}
return make_ujson_response(response, status_code=200) | Turns a model's prediction in *y_pred* into a JSON
response. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L179-L190 | [
"def make_ujson_response(obj, status_code=200):\n \"\"\"Encodes the given *obj* to json and wraps it in a response.\n\n :return:\n A Flask response.\n \"\"\"\n json_encoded = ujson.encode(obj, ensure_ascii=False, double_precision=-1)\n resp = make_response(json_encoded)\n resp.mimetype = 'application/json'\n resp.content_type = 'application/json; charset=utf-8'\n resp.status_code = status_code\n return resp\n"
] | class PredictService:
"""A default :class:`palladium.interfaces.PredictService`
implementation.
Aims to work out of the box for the most standard use cases.
Allows overriding of specific parts of its logic by using granular
methods to compose the work.
"""
types = {
'float': float,
'int': int,
'str': str,
'bool': lambda x: x.lower() == 'true',
}
def __init__(
self,
mapping,
params=(),
entry_point='/predict',
decorator_list_name='predict_decorators',
predict_proba=False,
unwrap_sample=False,
**kwargs
):
"""
:param mapping:
A list of query parameters and their type that should be
included in the request. These will be processed in the
:meth:`sample_from_data` method to construct a sample
that can be used for prediction. An example that expects
two request parameters called ``pos`` and ``neg`` that are
both of type str::
{ ...
'mapping': [('pos', 'str'), ('neg', 'str')]
... }
:param params:
Similarly to *mapping*, this is a list of name and type of
parameters that will be passed to the model's
:meth:`~palladium.interfaces.Model.predict` method as keyword
arguments.
:param predict_proba:
Instead of returning a single class (the default), when
*predict_proba* is set to true, the result will instead
contain a list of class probabilities.
:param unwrap_sample:
When working with text, scikit-learn and others will
sometimes expect the input to be a 1d array of strings
rather than a 2d array. Setting *unwrap_sample* to true
will use this representation.
"""
self.mapping = mapping
self.params = params
self.entry_point = entry_point
self.decorator_list_name = decorator_list_name
self.predict_proba = predict_proba
self.unwrap_sample = unwrap_sample
vars(self).update(kwargs)
def initialize_component(self, config):
create_predict_function(
self.entry_point, self, self.decorator_list_name, config)
def __call__(self, model, request):
try:
return self.do(model, request)
except Exception as e:
return self.response_from_exception(e)
def do(self, model, request):
if request.method == 'GET':
single = True
samples = np.array([self.sample_from_data(model, request.args)])
else:
single = False
samples = []
for data in request.json:
samples.append(self.sample_from_data(model, data))
samples = np.array(samples)
params = self.params_from_data(model, request.args)
y_pred = self.predict(model, samples, **params)
return self.response_from_prediction(y_pred, single=single)
def sample_from_data(self, model, data):
"""Convert incoming sample *data* into a numpy array.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the sample's data, typically retrieved from
``request.args`` or similar.
"""
values = []
for key, type_name in self.mapping:
value_type = self.types[type_name]
values.append(value_type(data[key]))
if self.unwrap_sample:
assert len(values) == 1
return np.array(values[0])
else:
return np.array(values, dtype=object)
def params_from_data(self, model, data):
"""Retrieve additional parameters (keyword arguments) for
``model.predict`` from request *data*.
:param model:
The :class:`~Model` instance to use for making predictions.
:param data:
A dict-like with the parameter data, typically retrieved
from ``request.args`` or similar.
"""
params = {}
for key, type_name in self.params:
value_type = self.types[type_name]
if key in data:
params[key] = value_type(data[key])
elif hasattr(model, key):
params[key] = getattr(model, key)
return params
def predict(self, model, sample, **kwargs):
if self.predict_proba:
return model.predict_proba(sample, **kwargs)
else:
return model.predict(sample, **kwargs)
def response_from_exception(self, exc):
if isinstance(exc, PredictError):
return make_ujson_response({
'metadata': get_metadata(
error_code=exc.error_code,
error_message=exc.error_message,
status="ERROR"
)
}, status_code=500)
elif isinstance(exc, BadRequest):
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="BadRequest: {}".format(exc.args),
status="ERROR"
)
}, status_code=400)
else:
logger.exception("Unexpected error")
return make_ujson_response({
'metadata': get_metadata(
error_code=-1,
error_message="{}: {}".format(
exc.__class__.__name__, str(exc)),
status="ERROR"
)
}, status_code=500)
|
ottogroup/palladium | palladium/server.py | PredictStream.listen | python | def listen(self, io_in, io_out, io_err):
for line in io_in:
if line.strip().lower() == 'exit':
break
try:
y_pred = self.process_line(line)
except Exception as e:
io_out.write('[]\n')
io_err.write(
"Error while processing input row: {}"
"{}: {}\n".format(line, type(e), e))
io_err.flush()
else:
io_out.write(ujson.dumps(y_pred.tolist()))
io_out.write('\n')
io_out.flush() | Listens to provided io stream and writes predictions
to output. In case of errors, the error stream will be used. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/server.py#L340-L359 | [
"def process_line(self, line):\n predict_service = self.predict_service\n datas = ujson.loads(line)\n samples = [predict_service.sample_from_data(self.model, data)\n for data in datas]\n samples = np.array(samples)\n params = predict_service.params_from_data(self.model, datas[0])\n return predict_service.predict(self.model, samples, **params)\n"
] | class PredictStream:
"""A class that helps make predictions through stdin and stdout.
"""
def __init__(self):
self.model = get_config()['model_persister'].read()
self.predict_service = get_config()['predict_service']
def process_line(self, line):
predict_service = self.predict_service
datas = ujson.loads(line)
samples = [predict_service.sample_from_data(self.model, data)
for data in datas]
samples = np.array(samples)
params = predict_service.params_from_data(self.model, datas[0])
return predict_service.predict(self.model, samples, **params)
|
ottogroup/palladium | palladium/eval.py | list_cmd | python | def list_cmd(argv=sys.argv[1:]): # pragma: no cover
docopt(list_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
list() | \
List information about available models.
Uses the 'model_persister' from the configuration to display a list of
models and their metadata.
Usage:
pld-list [options]
Options:
-h --help Show this screen. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/eval.py#L82-L97 | [
"def initialize_config(**extra):\n if _config.initialized:\n raise RuntimeError(\"Configuration was already initialized\")\n return get_config(**extra)\n"
] | """Utilities for testing the performance of a trained model.
"""
from pprint import pprint
import sys
from docopt import docopt
from sklearn.metrics import get_scorer
from .util import args_from_config
from .util import initialize_config
from .util import logger
from .util import timer
@args_from_config
def test(dataset_loader_test, model_persister,
scoring=None, model_version=None):
with timer(logger.info, "Loading data"):
X, y = dataset_loader_test()
with timer(logger.info, "Reading model"):
model = model_persister.read(version=model_version)
logger.info(
'Loaded model version {}'.format(model.__metadata__['version']))
if not (hasattr(model, 'score') or scoring is not None):
raise ValueError(
"Your model doesn't seem to implement a 'score' method. You may "
"want to define a 'scoring' option in the configuration."
)
with timer(logger.info, "Applying model"):
scores = []
if scoring is not None:
if not isinstance(scoring, dict):
scoring = {'score': scoring}
for key, scorer in scoring.items():
scorer = get_scorer(scorer)
scores.append("{}: {}".format(key, scorer(model, X, y)))
else:
scores.append("score: {}".format(model.score(X, y)))
logger.info("Score: {}.".format('\n '.join(scores)))
return scores
def test_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Test a model.
Uses 'dataset_loader_test' and 'model_persister' from the
configuration to load a test dataset to test the accuracy of a trained
model with.
Usage:
pld-test [options]
Options:
-h --help Show this screen.
--model-version=<version> The version of the model to be tested. If
not specified, the newest model will be used.
"""
arguments = docopt(test_cmd.__doc__, argv=argv)
model_version = arguments['--model-version']
model_version = int(model_version) if model_version is not None else None
initialize_config(__mode__='fit')
test(model_version=model_version)
@args_from_config
def list(model_persister):
print("Models:")
pprint(model_persister.list_models())
print("Properties:")
pprint(model_persister.list_properties())
|
ottogroup/palladium | palladium/fit.py | fit_cmd | python | def fit_cmd(argv=sys.argv[1:]): # pragma: no cover
arguments = docopt(fit_cmd.__doc__, argv=argv)
no_save = arguments['--no-save']
no_activate = arguments['--no-activate']
save_if_better_than = arguments['--save-if-better-than']
evaluate = arguments['--evaluate'] or bool(save_if_better_than)
if save_if_better_than is not None:
save_if_better_than = float(save_if_better_than)
initialize_config(__mode__='fit')
fit(
persist=not no_save,
activate=not no_activate,
evaluate=evaluate,
persist_if_better_than=save_if_better_than,
) | \
Fit a model and save to database.
Will use 'dataset_loader_train', 'model', and 'model_perister' from
the configuration file, to load a dataset to train a model with, and
persist it.
Usage:
pld-fit [options]
Options:
-n --no-save Don't persist the fitted model to disk.
--no-activate Don't activate the fitted model.
--save-if-better-than=<k> Persist only if test score better than given
value.
-e --evaluate Evaluate fitted model on train and test set and
print out results.
-h --help Show this screen. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/fit.py#L96-L133 | [
"def initialize_config(**extra):\n if _config.initialized:\n raise RuntimeError(\"Configuration was already initialized\")\n return get_config(**extra)\n"
] | """Utilities for fitting modles.
"""
from warnings import warn
import sys
from datetime import datetime
from docopt import docopt
import pandas
from sklearn.externals.joblib import parallel_backend
from sklearn.metrics import get_scorer
from sklearn.model_selection import GridSearchCV
from .interfaces import annotate
from .util import apply_kwargs
from .util import args_from_config
from .util import initialize_config
from .util import logger
from .util import PluggableDecorator
from .util import timer
def _persist_model(model, model_persister, activate=True):
metadata = {
'train_timestamp': datetime.now().isoformat(),
}
cv_results = getattr(model, 'cv_results_', None)
if cv_results is not None:
json_str = pandas.DataFrame(cv_results).to_json(orient='records')
metadata['cv_results'] = json_str
annotate(model, metadata)
with timer(logger.info, "Writing model"):
version = model_persister.write(model)
logger.info("Wrote model with version {}.".format(version))
if activate:
model_persister.activate(version)
@PluggableDecorator('fit_decorators')
@args_from_config
def fit(dataset_loader_train, model, model_persister, persist=True,
activate=True, dataset_loader_test=None, evaluate=False,
persist_if_better_than=None, scoring=None):
if persist_if_better_than is not None:
evaluate = True
if dataset_loader_test is None:
raise ValueError(
"When using 'persist_if_better_than', make sure you also "
"provide a 'dataset_loader_test'."
)
if evaluate and not (hasattr(model, 'score') or scoring is not None):
raise ValueError(
"Your model doesn't seem to implement a 'score' method. You may "
"want to define a 'scoring' option in the configuration."
)
if scoring is not None:
scorer = get_scorer(scoring)
else:
def scorer(model, X, y):
return model.score(X, y)
with timer(logger.info, "Loading data"):
X, y = dataset_loader_train()
with timer(logger.info, "Fitting model"):
model.fit(X, y)
if evaluate:
with timer(logger.debug, "Evaluating model on train set"):
score_train = scorer(model, X, y)
annotate(model, {'score_train': score_train})
logger.info("Train score: {}".format(score_train))
score_test = None
if evaluate and dataset_loader_test is not None:
X_test, y_test = dataset_loader_test()
with timer(logger.debug, "Evaluating model on test set"):
score_test = scorer(model, X_test, y_test)
annotate(model, {'score_test': score_test})
logger.info("Test score: {}".format(score_test))
if persist:
if (persist_if_better_than is not None and
score_test < persist_if_better_than):
logger.info("Not persisting model that has a test score "
"{} < {}".format(score_test, persist_if_better_than))
else:
_persist_model(model, model_persister, activate=activate)
return model
@args_from_config
def activate(model_persister, model_version):
model_persister.activate(model_version)
logger.info("Activated model with version {}.".format(model_version))
@args_from_config
def delete(model_persister, model_version):
model_persister.delete(model_version)
logger.info("Deleted model with version {}.".format(model_version))
def admin_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Activate or delete models.
Models are usually made active right after fitting (see command
pld-fit). The 'activate' command allows you to explicitly set the
currently active model. Use 'pld-list' to get an overview of all
available models along with their version identifiers.
Deleting a model will simply remove it from the database.
Usage:
pld-admin activate <version> [options]
pld-admin delete <version> [options]
Options:
-h --help Show this screen.
"""
arguments = docopt(admin_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
if arguments['activate']:
activate(model_version=int(arguments['<version>']))
elif arguments['delete']:
delete(model_version=int(arguments['<version>']))
def with_parallel_backend(
estimator,
backend,
methods=('fit', 'predict', 'predict_proba'),
**backend_params
):
def wrapper(func):
def wrapped(*args, **kwargs):
with parallel_backend(backend, **backend_params):
return func(*args, **kwargs)
return wrapped
for name in methods:
setattr(estimator, name, wrapper(getattr(estimator, name)))
return estimator
@args_from_config
def grid_search(dataset_loader_train, model, grid_search, scoring=None,
save_results=None, persist_best=False, model_persister=None):
if persist_best and model_persister is None:
raise ValueError(
"Cannot persist the best model without a model_persister. Please "
"specify one in your Palladium configuration file."
)
with timer(logger.info, "Loading data"):
X, y = dataset_loader_train()
if isinstance(grid_search, dict):
search_kwargs = {
'refit': persist_best,
}
search_kwargs.update(grid_search)
cv = search_kwargs.get('cv', None)
if callable(cv):
search_kwargs['cv'] = apply_kwargs(cv, n=len(y), X=X, y=y)
if 'scoring' in search_kwargs:
warn("Use of 'scoring' inside of 'grid_search' is deprecated. "
"To fix, move 'scoring' up to the top level of the configuration "
"dict.", DeprecationWarning)
if scoring is not None:
raise ValueError("You cannot define 'scoring' in 'grid_search' "
"and globally.")
scoring = search_kwargs['scoring']
elif scoring is not None:
search_kwargs['scoring'] = scoring
if not (hasattr(model, 'score') or scoring is not None):
raise ValueError(
"Your model doesn't seem to implement a 'score' method. You may "
"want to define a 'scoring' option in the configuration."
)
search = GridSearchCV(model, **search_kwargs)
else:
search = grid_search
with timer(logger.info, "Running grid search"):
search.fit(X, y)
results = pandas.DataFrame(search.cv_results_)
pandas.options.display.max_rows = len(results)
pandas.options.display.max_columns = len(results.columns)
if 'rank_test_score' in results:
results = results.sort_values('rank_test_score')
print(results)
if save_results:
results.to_csv(save_results, index=False)
if persist_best:
_persist_model(search, model_persister, activate=True)
return search
def grid_search_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Grid search parameters for the model.
Uses 'dataset_loader_train', 'model', and 'grid_search' from the
configuration to load a training dataset, and run a grid search on the
model using the grid of hyperparameters.
Usage:
pld-grid-search [options]
Options:
--save-results=<fname> Save results to CSV file
--persist-best Persist the best model from grid search
-h --help Show this screen.
"""
arguments = docopt(grid_search_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
grid_search(
save_results=arguments['--save-results'],
persist_best=arguments['--persist-best'],
)
|
ottogroup/palladium | palladium/fit.py | admin_cmd | python | def admin_cmd(argv=sys.argv[1:]): # pragma: no cover
arguments = docopt(admin_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
if arguments['activate']:
activate(model_version=int(arguments['<version>']))
elif arguments['delete']:
delete(model_version=int(arguments['<version>'])) | \
Activate or delete models.
Models are usually made active right after fitting (see command
pld-fit). The 'activate' command allows you to explicitly set the
currently active model. Use 'pld-list' to get an overview of all
available models along with their version identifiers.
Deleting a model will simply remove it from the database.
Usage:
pld-admin activate <version> [options]
pld-admin delete <version> [options]
Options:
-h --help Show this screen. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/fit.py#L148-L171 | [
"def initialize_config(**extra):\n if _config.initialized:\n raise RuntimeError(\"Configuration was already initialized\")\n return get_config(**extra)\n"
] | """Utilities for fitting modles.
"""
from warnings import warn
import sys
from datetime import datetime
from docopt import docopt
import pandas
from sklearn.externals.joblib import parallel_backend
from sklearn.metrics import get_scorer
from sklearn.model_selection import GridSearchCV
from .interfaces import annotate
from .util import apply_kwargs
from .util import args_from_config
from .util import initialize_config
from .util import logger
from .util import PluggableDecorator
from .util import timer
def _persist_model(model, model_persister, activate=True):
metadata = {
'train_timestamp': datetime.now().isoformat(),
}
cv_results = getattr(model, 'cv_results_', None)
if cv_results is not None:
json_str = pandas.DataFrame(cv_results).to_json(orient='records')
metadata['cv_results'] = json_str
annotate(model, metadata)
with timer(logger.info, "Writing model"):
version = model_persister.write(model)
logger.info("Wrote model with version {}.".format(version))
if activate:
model_persister.activate(version)
@PluggableDecorator('fit_decorators')
@args_from_config
def fit(dataset_loader_train, model, model_persister, persist=True,
activate=True, dataset_loader_test=None, evaluate=False,
persist_if_better_than=None, scoring=None):
if persist_if_better_than is not None:
evaluate = True
if dataset_loader_test is None:
raise ValueError(
"When using 'persist_if_better_than', make sure you also "
"provide a 'dataset_loader_test'."
)
if evaluate and not (hasattr(model, 'score') or scoring is not None):
raise ValueError(
"Your model doesn't seem to implement a 'score' method. You may "
"want to define a 'scoring' option in the configuration."
)
if scoring is not None:
scorer = get_scorer(scoring)
else:
def scorer(model, X, y):
return model.score(X, y)
with timer(logger.info, "Loading data"):
X, y = dataset_loader_train()
with timer(logger.info, "Fitting model"):
model.fit(X, y)
if evaluate:
with timer(logger.debug, "Evaluating model on train set"):
score_train = scorer(model, X, y)
annotate(model, {'score_train': score_train})
logger.info("Train score: {}".format(score_train))
score_test = None
if evaluate and dataset_loader_test is not None:
X_test, y_test = dataset_loader_test()
with timer(logger.debug, "Evaluating model on test set"):
score_test = scorer(model, X_test, y_test)
annotate(model, {'score_test': score_test})
logger.info("Test score: {}".format(score_test))
if persist:
if (persist_if_better_than is not None and
score_test < persist_if_better_than):
logger.info("Not persisting model that has a test score "
"{} < {}".format(score_test, persist_if_better_than))
else:
_persist_model(model, model_persister, activate=activate)
return model
def fit_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Fit a model and save to database.
Will use 'dataset_loader_train', 'model', and 'model_perister' from
the configuration file, to load a dataset to train a model with, and
persist it.
Usage:
pld-fit [options]
Options:
-n --no-save Don't persist the fitted model to disk.
--no-activate Don't activate the fitted model.
--save-if-better-than=<k> Persist only if test score better than given
value.
-e --evaluate Evaluate fitted model on train and test set and
print out results.
-h --help Show this screen.
"""
arguments = docopt(fit_cmd.__doc__, argv=argv)
no_save = arguments['--no-save']
no_activate = arguments['--no-activate']
save_if_better_than = arguments['--save-if-better-than']
evaluate = arguments['--evaluate'] or bool(save_if_better_than)
if save_if_better_than is not None:
save_if_better_than = float(save_if_better_than)
initialize_config(__mode__='fit')
fit(
persist=not no_save,
activate=not no_activate,
evaluate=evaluate,
persist_if_better_than=save_if_better_than,
)
@args_from_config
def activate(model_persister, model_version):
model_persister.activate(model_version)
logger.info("Activated model with version {}.".format(model_version))
@args_from_config
def delete(model_persister, model_version):
model_persister.delete(model_version)
logger.info("Deleted model with version {}.".format(model_version))
def with_parallel_backend(
estimator,
backend,
methods=('fit', 'predict', 'predict_proba'),
**backend_params
):
def wrapper(func):
def wrapped(*args, **kwargs):
with parallel_backend(backend, **backend_params):
return func(*args, **kwargs)
return wrapped
for name in methods:
setattr(estimator, name, wrapper(getattr(estimator, name)))
return estimator
@args_from_config
def grid_search(dataset_loader_train, model, grid_search, scoring=None,
save_results=None, persist_best=False, model_persister=None):
if persist_best and model_persister is None:
raise ValueError(
"Cannot persist the best model without a model_persister. Please "
"specify one in your Palladium configuration file."
)
with timer(logger.info, "Loading data"):
X, y = dataset_loader_train()
if isinstance(grid_search, dict):
search_kwargs = {
'refit': persist_best,
}
search_kwargs.update(grid_search)
cv = search_kwargs.get('cv', None)
if callable(cv):
search_kwargs['cv'] = apply_kwargs(cv, n=len(y), X=X, y=y)
if 'scoring' in search_kwargs:
warn("Use of 'scoring' inside of 'grid_search' is deprecated. "
"To fix, move 'scoring' up to the top level of the configuration "
"dict.", DeprecationWarning)
if scoring is not None:
raise ValueError("You cannot define 'scoring' in 'grid_search' "
"and globally.")
scoring = search_kwargs['scoring']
elif scoring is not None:
search_kwargs['scoring'] = scoring
if not (hasattr(model, 'score') or scoring is not None):
raise ValueError(
"Your model doesn't seem to implement a 'score' method. You may "
"want to define a 'scoring' option in the configuration."
)
search = GridSearchCV(model, **search_kwargs)
else:
search = grid_search
with timer(logger.info, "Running grid search"):
search.fit(X, y)
results = pandas.DataFrame(search.cv_results_)
pandas.options.display.max_rows = len(results)
pandas.options.display.max_columns = len(results.columns)
if 'rank_test_score' in results:
results = results.sort_values('rank_test_score')
print(results)
if save_results:
results.to_csv(save_results, index=False)
if persist_best:
_persist_model(search, model_persister, activate=True)
return search
def grid_search_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Grid search parameters for the model.
Uses 'dataset_loader_train', 'model', and 'grid_search' from the
configuration to load a training dataset, and run a grid search on the
model using the grid of hyperparameters.
Usage:
pld-grid-search [options]
Options:
--save-results=<fname> Save results to CSV file
--persist-best Persist the best model from grid search
-h --help Show this screen.
"""
arguments = docopt(grid_search_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
grid_search(
save_results=arguments['--save-results'],
persist_best=arguments['--persist-best'],
)
|
ottogroup/palladium | palladium/fit.py | grid_search_cmd | python | def grid_search_cmd(argv=sys.argv[1:]): # pragma: no cover
arguments = docopt(grid_search_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
grid_search(
save_results=arguments['--save-results'],
persist_best=arguments['--persist-best'],
) | \
Grid search parameters for the model.
Uses 'dataset_loader_train', 'model', and 'grid_search' from the
configuration to load a training dataset, and run a grid search on the
model using the grid of hyperparameters.
Usage:
pld-grid-search [options]
Options:
--save-results=<fname> Save results to CSV file
--persist-best Persist the best model from grid search
-h --help Show this screen. | train | https://github.com/ottogroup/palladium/blob/f3a4372fba809efbd8da7c979a8c6faff04684dd/palladium/fit.py#L250-L271 | [
"def initialize_config(**extra):\n if _config.initialized:\n raise RuntimeError(\"Configuration was already initialized\")\n return get_config(**extra)\n"
] | """Utilities for fitting modles.
"""
from warnings import warn
import sys
from datetime import datetime
from docopt import docopt
import pandas
from sklearn.externals.joblib import parallel_backend
from sklearn.metrics import get_scorer
from sklearn.model_selection import GridSearchCV
from .interfaces import annotate
from .util import apply_kwargs
from .util import args_from_config
from .util import initialize_config
from .util import logger
from .util import PluggableDecorator
from .util import timer
def _persist_model(model, model_persister, activate=True):
metadata = {
'train_timestamp': datetime.now().isoformat(),
}
cv_results = getattr(model, 'cv_results_', None)
if cv_results is not None:
json_str = pandas.DataFrame(cv_results).to_json(orient='records')
metadata['cv_results'] = json_str
annotate(model, metadata)
with timer(logger.info, "Writing model"):
version = model_persister.write(model)
logger.info("Wrote model with version {}.".format(version))
if activate:
model_persister.activate(version)
@PluggableDecorator('fit_decorators')
@args_from_config
def fit(dataset_loader_train, model, model_persister, persist=True,
activate=True, dataset_loader_test=None, evaluate=False,
persist_if_better_than=None, scoring=None):
if persist_if_better_than is not None:
evaluate = True
if dataset_loader_test is None:
raise ValueError(
"When using 'persist_if_better_than', make sure you also "
"provide a 'dataset_loader_test'."
)
if evaluate and not (hasattr(model, 'score') or scoring is not None):
raise ValueError(
"Your model doesn't seem to implement a 'score' method. You may "
"want to define a 'scoring' option in the configuration."
)
if scoring is not None:
scorer = get_scorer(scoring)
else:
def scorer(model, X, y):
return model.score(X, y)
with timer(logger.info, "Loading data"):
X, y = dataset_loader_train()
with timer(logger.info, "Fitting model"):
model.fit(X, y)
if evaluate:
with timer(logger.debug, "Evaluating model on train set"):
score_train = scorer(model, X, y)
annotate(model, {'score_train': score_train})
logger.info("Train score: {}".format(score_train))
score_test = None
if evaluate and dataset_loader_test is not None:
X_test, y_test = dataset_loader_test()
with timer(logger.debug, "Evaluating model on test set"):
score_test = scorer(model, X_test, y_test)
annotate(model, {'score_test': score_test})
logger.info("Test score: {}".format(score_test))
if persist:
if (persist_if_better_than is not None and
score_test < persist_if_better_than):
logger.info("Not persisting model that has a test score "
"{} < {}".format(score_test, persist_if_better_than))
else:
_persist_model(model, model_persister, activate=activate)
return model
def fit_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Fit a model and save to database.
Will use 'dataset_loader_train', 'model', and 'model_perister' from
the configuration file, to load a dataset to train a model with, and
persist it.
Usage:
pld-fit [options]
Options:
-n --no-save Don't persist the fitted model to disk.
--no-activate Don't activate the fitted model.
--save-if-better-than=<k> Persist only if test score better than given
value.
-e --evaluate Evaluate fitted model on train and test set and
print out results.
-h --help Show this screen.
"""
arguments = docopt(fit_cmd.__doc__, argv=argv)
no_save = arguments['--no-save']
no_activate = arguments['--no-activate']
save_if_better_than = arguments['--save-if-better-than']
evaluate = arguments['--evaluate'] or bool(save_if_better_than)
if save_if_better_than is not None:
save_if_better_than = float(save_if_better_than)
initialize_config(__mode__='fit')
fit(
persist=not no_save,
activate=not no_activate,
evaluate=evaluate,
persist_if_better_than=save_if_better_than,
)
@args_from_config
def activate(model_persister, model_version):
model_persister.activate(model_version)
logger.info("Activated model with version {}.".format(model_version))
@args_from_config
def delete(model_persister, model_version):
model_persister.delete(model_version)
logger.info("Deleted model with version {}.".format(model_version))
def admin_cmd(argv=sys.argv[1:]): # pragma: no cover
"""\
Activate or delete models.
Models are usually made active right after fitting (see command
pld-fit). The 'activate' command allows you to explicitly set the
currently active model. Use 'pld-list' to get an overview of all
available models along with their version identifiers.
Deleting a model will simply remove it from the database.
Usage:
pld-admin activate <version> [options]
pld-admin delete <version> [options]
Options:
-h --help Show this screen.
"""
arguments = docopt(admin_cmd.__doc__, argv=argv)
initialize_config(__mode__='fit')
if arguments['activate']:
activate(model_version=int(arguments['<version>']))
elif arguments['delete']:
delete(model_version=int(arguments['<version>']))
def with_parallel_backend(
estimator,
backend,
methods=('fit', 'predict', 'predict_proba'),
**backend_params
):
def wrapper(func):
def wrapped(*args, **kwargs):
with parallel_backend(backend, **backend_params):
return func(*args, **kwargs)
return wrapped
for name in methods:
setattr(estimator, name, wrapper(getattr(estimator, name)))
return estimator
@args_from_config
def grid_search(dataset_loader_train, model, grid_search, scoring=None,
save_results=None, persist_best=False, model_persister=None):
if persist_best and model_persister is None:
raise ValueError(
"Cannot persist the best model without a model_persister. Please "
"specify one in your Palladium configuration file."
)
with timer(logger.info, "Loading data"):
X, y = dataset_loader_train()
if isinstance(grid_search, dict):
search_kwargs = {
'refit': persist_best,
}
search_kwargs.update(grid_search)
cv = search_kwargs.get('cv', None)
if callable(cv):
search_kwargs['cv'] = apply_kwargs(cv, n=len(y), X=X, y=y)
if 'scoring' in search_kwargs:
warn("Use of 'scoring' inside of 'grid_search' is deprecated. "
"To fix, move 'scoring' up to the top level of the configuration "
"dict.", DeprecationWarning)
if scoring is not None:
raise ValueError("You cannot define 'scoring' in 'grid_search' "
"and globally.")
scoring = search_kwargs['scoring']
elif scoring is not None:
search_kwargs['scoring'] = scoring
if not (hasattr(model, 'score') or scoring is not None):
raise ValueError(
"Your model doesn't seem to implement a 'score' method. You may "
"want to define a 'scoring' option in the configuration."
)
search = GridSearchCV(model, **search_kwargs)
else:
search = grid_search
with timer(logger.info, "Running grid search"):
search.fit(X, y)
results = pandas.DataFrame(search.cv_results_)
pandas.options.display.max_rows = len(results)
pandas.options.display.max_columns = len(results.columns)
if 'rank_test_score' in results:
results = results.sort_values('rank_test_score')
print(results)
if save_results:
results.to_csv(save_results, index=False)
if persist_best:
_persist_model(search, model_persister, activate=True)
return search
|
prawn-cake/vk-requests | vk_requests/streaming.py | Stream.consumer | python | def consumer(self, fn):
if self._consumer_fn is not None:
raise ValueError('Consumer function is already defined for this '
'Stream instance')
if not any([asyncio.iscoroutine(fn), asyncio.iscoroutinefunction(fn)]):
raise ValueError('Consumer function must be a coroutine')
self._consumer_fn = fn | Consumer decorator
:param fn: coroutine consumer function
Example:
>>> api = StreamingAPI('my_service_key')
>>> stream = api.get_stream()
>>> @stream.consumer
>>> @asyncio.coroutine
>>> def handle_event(payload):
>>> print(payload) | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/streaming.py#L26-L47 | null | class Stream(object):
"""Stream representation"""
def __init__(self, conn_url):
self._conn_url = conn_url
self._consumer_fn = None
def __repr__(self):
return '%s(conn_url=%s)' % (self.__class__.__name__, self._conn_url)
def consume(self, timeout=None, loop=None):
"""Start consuming the stream
:param timeout: int: if it's given then it stops consumer after given
number of seconds
"""
if self._consumer_fn is None:
raise ValueError('Consumer function is not defined yet')
logger.info('Start consuming the stream')
@asyncio.coroutine
def worker(conn_url):
extra_headers = {
'Connection': 'upgrade',
'Upgrade': 'websocket',
'Sec-Websocket-Version': 13,
}
ws = yield from websockets.connect(
conn_url, extra_headers=extra_headers)
if ws is None:
raise RuntimeError("Couldn't connect to the '%s'" % conn_url)
try:
while True:
message = yield from ws.recv()
yield from self._consumer_fn(message)
finally:
yield from ws.close()
if loop is None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
task = worker(conn_url=self._conn_url)
if timeout:
logger.info('Running task with timeout %s sec', timeout)
loop.run_until_complete(
asyncio.wait_for(task, timeout=timeout))
else:
loop.run_until_complete(task)
except asyncio.TimeoutError:
logger.info('Timeout is reached. Closing the loop')
loop.close()
except KeyboardInterrupt:
logger.info('Closing the loop')
loop.close()
|
prawn-cake/vk-requests | vk_requests/streaming.py | Stream.consume | python | def consume(self, timeout=None, loop=None):
if self._consumer_fn is None:
raise ValueError('Consumer function is not defined yet')
logger.info('Start consuming the stream')
@asyncio.coroutine
def worker(conn_url):
extra_headers = {
'Connection': 'upgrade',
'Upgrade': 'websocket',
'Sec-Websocket-Version': 13,
}
ws = yield from websockets.connect(
conn_url, extra_headers=extra_headers)
if ws is None:
raise RuntimeError("Couldn't connect to the '%s'" % conn_url)
try:
while True:
message = yield from ws.recv()
yield from self._consumer_fn(message)
finally:
yield from ws.close()
if loop is None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
task = worker(conn_url=self._conn_url)
if timeout:
logger.info('Running task with timeout %s sec', timeout)
loop.run_until_complete(
asyncio.wait_for(task, timeout=timeout))
else:
loop.run_until_complete(task)
except asyncio.TimeoutError:
logger.info('Timeout is reached. Closing the loop')
loop.close()
except KeyboardInterrupt:
logger.info('Closing the loop')
loop.close() | Start consuming the stream
:param timeout: int: if it's given then it stops consumer after given
number of seconds | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/streaming.py#L49-L98 | null | class Stream(object):
"""Stream representation"""
def __init__(self, conn_url):
self._conn_url = conn_url
self._consumer_fn = None
def __repr__(self):
return '%s(conn_url=%s)' % (self.__class__.__name__, self._conn_url)
def consumer(self, fn):
"""Consumer decorator
:param fn: coroutine consumer function
Example:
>>> api = StreamingAPI('my_service_key')
>>> stream = api.get_stream()
>>> @stream.consumer
>>> @asyncio.coroutine
>>> def handle_event(payload):
>>> print(payload)
"""
if self._consumer_fn is not None:
raise ValueError('Consumer function is already defined for this '
'Stream instance')
if not any([asyncio.iscoroutine(fn), asyncio.iscoroutinefunction(fn)]):
raise ValueError('Consumer function must be a coroutine')
self._consumer_fn = fn
|
prawn-cake/vk-requests | vk_requests/streaming.py | StreamingAPI.add_rule | python | def add_rule(self, value, tag):
resp = requests.post(url=self.REQUEST_URL.format(**self._params),
json={'rule': {'value': value, 'tag': tag}})
return resp.json() | Add a new rule
:param value: str
:param tag: str
:return: dict of a json response | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/streaming.py#L117-L126 | null | class StreamingAPI(object):
"""VK Streaming API implementation
Docs: https://vk.com/dev/streaming_api_docs
"""
REQUEST_URL = 'https://{endpoint}/rules?key={key}'
STREAM_URL = 'wss://{endpoint}/stream?key={key}'
def __init__(self, service_token):
if not service_token:
raise ValueError('service_token is required')
import vk_requests
self.api = vk_requests.create_api(service_token=service_token)
self._params = self.api.streaming.getServerUrl()
def get_rules(self):
resp = requests.get(url=self.REQUEST_URL.format(**self._params))
return resp.json()
def remove_rule(self, tag):
"""Remove a rule by tag
"""
resp = requests.delete(url=self.REQUEST_URL.format(**self._params),
json={'tag': tag})
return resp.json()
def get_stream(self):
"""Factory method to get a stream object
:return Stream instance
"""
return Stream(conn_url=self.STREAM_URL.format(**self._params))
def get_settings(self):
"""Get settings object with monthly limit info
"""
return self.api.streaming.getSettings()
|
prawn-cake/vk-requests | vk_requests/streaming.py | StreamingAPI.remove_rule | python | def remove_rule(self, tag):
resp = requests.delete(url=self.REQUEST_URL.format(**self._params),
json={'tag': tag})
return resp.json() | Remove a rule by tag | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/streaming.py#L132-L138 | null | class StreamingAPI(object):
"""VK Streaming API implementation
Docs: https://vk.com/dev/streaming_api_docs
"""
REQUEST_URL = 'https://{endpoint}/rules?key={key}'
STREAM_URL = 'wss://{endpoint}/stream?key={key}'
def __init__(self, service_token):
if not service_token:
raise ValueError('service_token is required')
import vk_requests
self.api = vk_requests.create_api(service_token=service_token)
self._params = self.api.streaming.getServerUrl()
def add_rule(self, value, tag):
"""Add a new rule
:param value: str
:param tag: str
:return: dict of a json response
"""
resp = requests.post(url=self.REQUEST_URL.format(**self._params),
json={'rule': {'value': value, 'tag': tag}})
return resp.json()
def get_rules(self):
resp = requests.get(url=self.REQUEST_URL.format(**self._params))
return resp.json()
def get_stream(self):
"""Factory method to get a stream object
:return Stream instance
"""
return Stream(conn_url=self.STREAM_URL.format(**self._params))
def get_settings(self):
"""Get settings object with monthly limit info
"""
return self.api.streaming.getSettings()
|
prawn-cake/vk-requests | vk_requests/utils.py | stringify_values | python | def stringify_values(data):
if not isinstance(data, dict):
raise ValueError('Data must be dict. %r is passed' % data)
values_dict = {}
for key, value in data.items():
items = []
if isinstance(value, six.string_types):
items.append(value)
elif isinstance(value, Iterable):
for v in value:
# Convert to str int values
if isinstance(v, int):
v = str(v)
try:
item = six.u(v)
except TypeError:
item = v
items.append(item)
value = ','.join(items)
values_dict[key] = value
return values_dict | Coerce iterable values to 'val1,val2,valN'
Example:
fields=['nickname', 'city', 'can_see_all_posts']
--> fields='nickname,city,can_see_all_posts'
:param data: dict
:return: converted values dict | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L22-L52 | null | # -*- coding: utf-8 -*-
import logging
from collections import Iterable
import bs4
import requests
import six
from vk_requests.exceptions import VkParseError, VkPageWarningsError
logger = logging.getLogger('vk-requests')
try:
# Python 2
from urllib import urlencode
from urlparse import urlparse, parse_qsl, urljoin
except ImportError:
# Python 3
from urllib.parse import urlparse, parse_qsl, urlencode, urljoin
def parse_url_query_params(url, fragment=True):
"""Parse url query params
:param fragment: bool: flag is used for parsing oauth url
:param url: str: url string
:return: dict
"""
parsed_url = urlparse(url)
if fragment:
url_query = parse_qsl(parsed_url.fragment)
else:
url_query = parse_qsl(parsed_url.query)
# login_response_url_query can have multiple key
url_query = dict(url_query)
return url_query
def parse_form_action_url(html, parser=None):
"""Parse <form action="(.+)"> url
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: url str: for example: /login.php?act=security_check&to=&hash=12346
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
forms = parser.find_all('form')
if not forms:
raise VkParseError('Action form is not found in the html \n%s' % html)
if len(forms) > 1:
raise VkParseError('Find more than 1 forms to handle:\n%s', forms)
form = forms[0]
return form.get('action')
def parse_captcha_html(html, response_url):
parser = bs4.BeautifulSoup(html, 'html.parser')
captcha_sid = parser.find('input', {"name": "captcha_sid"}).get("value")
captcha_img = parser.find('img', {"id": "captcha"}).get("src")
captcha_url = urljoin(response_url, captcha_img)
return captcha_sid, captcha_url
def parse_masked_phone_number(html, parser=None):
"""Get masked phone number from security check html
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: tuple of phone prefix and suffix, for example: ('+1234', '89')
:rtype : tuple
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
fields = parser.find_all('span', {'class': 'field_prefix'})
if not fields:
raise VkParseError(
'No <span class="field_prefix">...</span> in the \n%s' % html)
result = []
for f in fields:
value = f.get_text().replace(six.u('\xa0'), '')
result.append(value)
return tuple(result)
def check_html_warnings(html, parser=None):
"""Check html warnings
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:raise VkPageWarningsError: in case of found warnings
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
# Check warnings
warnings = parser.find_all('div', {'class': 'service_msg_warning'})
if warnings:
raise VkPageWarningsError('; '.join([w.get_text() for w in warnings]))
return True
class VerboseHTTPSession(requests.Session):
"""HTTP session based on requests.Session with some extra logging
"""
def __init__(self):
super(VerboseHTTPSession, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def request(self, method, url, **kwargs):
self.logger.debug(
'Request: %s %s, params=%r, data=%r',
method, url, kwargs.get('params'), kwargs.get('data'))
response = super(VerboseHTTPSession, self).request(
method, url, **kwargs)
self.logger.debug(
'Response: %s %s', response.status_code, response.url)
return response
|
prawn-cake/vk-requests | vk_requests/utils.py | parse_url_query_params | python | def parse_url_query_params(url, fragment=True):
parsed_url = urlparse(url)
if fragment:
url_query = parse_qsl(parsed_url.fragment)
else:
url_query = parse_qsl(parsed_url.query)
# login_response_url_query can have multiple key
url_query = dict(url_query)
return url_query | Parse url query params
:param fragment: bool: flag is used for parsing oauth url
:param url: str: url string
:return: dict | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L55-L69 | null | # -*- coding: utf-8 -*-
import logging
from collections import Iterable
import bs4
import requests
import six
from vk_requests.exceptions import VkParseError, VkPageWarningsError
logger = logging.getLogger('vk-requests')
try:
# Python 2
from urllib import urlencode
from urlparse import urlparse, parse_qsl, urljoin
except ImportError:
# Python 3
from urllib.parse import urlparse, parse_qsl, urlencode, urljoin
def stringify_values(data):
"""Coerce iterable values to 'val1,val2,valN'
Example:
fields=['nickname', 'city', 'can_see_all_posts']
--> fields='nickname,city,can_see_all_posts'
:param data: dict
:return: converted values dict
"""
if not isinstance(data, dict):
raise ValueError('Data must be dict. %r is passed' % data)
values_dict = {}
for key, value in data.items():
items = []
if isinstance(value, six.string_types):
items.append(value)
elif isinstance(value, Iterable):
for v in value:
# Convert to str int values
if isinstance(v, int):
v = str(v)
try:
item = six.u(v)
except TypeError:
item = v
items.append(item)
value = ','.join(items)
values_dict[key] = value
return values_dict
def parse_form_action_url(html, parser=None):
"""Parse <form action="(.+)"> url
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: url str: for example: /login.php?act=security_check&to=&hash=12346
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
forms = parser.find_all('form')
if not forms:
raise VkParseError('Action form is not found in the html \n%s' % html)
if len(forms) > 1:
raise VkParseError('Find more than 1 forms to handle:\n%s', forms)
form = forms[0]
return form.get('action')
def parse_captcha_html(html, response_url):
parser = bs4.BeautifulSoup(html, 'html.parser')
captcha_sid = parser.find('input', {"name": "captcha_sid"}).get("value")
captcha_img = parser.find('img', {"id": "captcha"}).get("src")
captcha_url = urljoin(response_url, captcha_img)
return captcha_sid, captcha_url
def parse_masked_phone_number(html, parser=None):
"""Get masked phone number from security check html
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: tuple of phone prefix and suffix, for example: ('+1234', '89')
:rtype : tuple
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
fields = parser.find_all('span', {'class': 'field_prefix'})
if not fields:
raise VkParseError(
'No <span class="field_prefix">...</span> in the \n%s' % html)
result = []
for f in fields:
value = f.get_text().replace(six.u('\xa0'), '')
result.append(value)
return tuple(result)
def check_html_warnings(html, parser=None):
"""Check html warnings
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:raise VkPageWarningsError: in case of found warnings
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
# Check warnings
warnings = parser.find_all('div', {'class': 'service_msg_warning'})
if warnings:
raise VkPageWarningsError('; '.join([w.get_text() for w in warnings]))
return True
class VerboseHTTPSession(requests.Session):
"""HTTP session based on requests.Session with some extra logging
"""
def __init__(self):
super(VerboseHTTPSession, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def request(self, method, url, **kwargs):
self.logger.debug(
'Request: %s %s, params=%r, data=%r',
method, url, kwargs.get('params'), kwargs.get('data'))
response = super(VerboseHTTPSession, self).request(
method, url, **kwargs)
self.logger.debug(
'Response: %s %s', response.status_code, response.url)
return response
|
prawn-cake/vk-requests | vk_requests/utils.py | parse_form_action_url | python | def parse_form_action_url(html, parser=None):
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
forms = parser.find_all('form')
if not forms:
raise VkParseError('Action form is not found in the html \n%s' % html)
if len(forms) > 1:
raise VkParseError('Find more than 1 forms to handle:\n%s', forms)
form = forms[0]
return form.get('action') | Parse <form action="(.+)"> url
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: url str: for example: /login.php?act=security_check&to=&hash=12346 | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L72-L88 | null | # -*- coding: utf-8 -*-
import logging
from collections import Iterable
import bs4
import requests
import six
from vk_requests.exceptions import VkParseError, VkPageWarningsError
logger = logging.getLogger('vk-requests')
try:
# Python 2
from urllib import urlencode
from urlparse import urlparse, parse_qsl, urljoin
except ImportError:
# Python 3
from urllib.parse import urlparse, parse_qsl, urlencode, urljoin
def stringify_values(data):
"""Coerce iterable values to 'val1,val2,valN'
Example:
fields=['nickname', 'city', 'can_see_all_posts']
--> fields='nickname,city,can_see_all_posts'
:param data: dict
:return: converted values dict
"""
if not isinstance(data, dict):
raise ValueError('Data must be dict. %r is passed' % data)
values_dict = {}
for key, value in data.items():
items = []
if isinstance(value, six.string_types):
items.append(value)
elif isinstance(value, Iterable):
for v in value:
# Convert to str int values
if isinstance(v, int):
v = str(v)
try:
item = six.u(v)
except TypeError:
item = v
items.append(item)
value = ','.join(items)
values_dict[key] = value
return values_dict
def parse_url_query_params(url, fragment=True):
"""Parse url query params
:param fragment: bool: flag is used for parsing oauth url
:param url: str: url string
:return: dict
"""
parsed_url = urlparse(url)
if fragment:
url_query = parse_qsl(parsed_url.fragment)
else:
url_query = parse_qsl(parsed_url.query)
# login_response_url_query can have multiple key
url_query = dict(url_query)
return url_query
def parse_captcha_html(html, response_url):
parser = bs4.BeautifulSoup(html, 'html.parser')
captcha_sid = parser.find('input', {"name": "captcha_sid"}).get("value")
captcha_img = parser.find('img', {"id": "captcha"}).get("src")
captcha_url = urljoin(response_url, captcha_img)
return captcha_sid, captcha_url
def parse_masked_phone_number(html, parser=None):
"""Get masked phone number from security check html
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: tuple of phone prefix and suffix, for example: ('+1234', '89')
:rtype : tuple
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
fields = parser.find_all('span', {'class': 'field_prefix'})
if not fields:
raise VkParseError(
'No <span class="field_prefix">...</span> in the \n%s' % html)
result = []
for f in fields:
value = f.get_text().replace(six.u('\xa0'), '')
result.append(value)
return tuple(result)
def check_html_warnings(html, parser=None):
"""Check html warnings
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:raise VkPageWarningsError: in case of found warnings
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
# Check warnings
warnings = parser.find_all('div', {'class': 'service_msg_warning'})
if warnings:
raise VkPageWarningsError('; '.join([w.get_text() for w in warnings]))
return True
class VerboseHTTPSession(requests.Session):
"""HTTP session based on requests.Session with some extra logging
"""
def __init__(self):
super(VerboseHTTPSession, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def request(self, method, url, **kwargs):
self.logger.debug(
'Request: %s %s, params=%r, data=%r',
method, url, kwargs.get('params'), kwargs.get('data'))
response = super(VerboseHTTPSession, self).request(
method, url, **kwargs)
self.logger.debug(
'Response: %s %s', response.status_code, response.url)
return response
|
prawn-cake/vk-requests | vk_requests/utils.py | parse_masked_phone_number | python | def parse_masked_phone_number(html, parser=None):
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
fields = parser.find_all('span', {'class': 'field_prefix'})
if not fields:
raise VkParseError(
'No <span class="field_prefix">...</span> in the \n%s' % html)
result = []
for f in fields:
value = f.get_text().replace(six.u('\xa0'), '')
result.append(value)
return tuple(result) | Get masked phone number from security check html
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: tuple of phone prefix and suffix, for example: ('+1234', '89')
:rtype : tuple | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L99-L119 | null | # -*- coding: utf-8 -*-
import logging
from collections import Iterable
import bs4
import requests
import six
from vk_requests.exceptions import VkParseError, VkPageWarningsError
logger = logging.getLogger('vk-requests')
try:
# Python 2
from urllib import urlencode
from urlparse import urlparse, parse_qsl, urljoin
except ImportError:
# Python 3
from urllib.parse import urlparse, parse_qsl, urlencode, urljoin
def stringify_values(data):
"""Coerce iterable values to 'val1,val2,valN'
Example:
fields=['nickname', 'city', 'can_see_all_posts']
--> fields='nickname,city,can_see_all_posts'
:param data: dict
:return: converted values dict
"""
if not isinstance(data, dict):
raise ValueError('Data must be dict. %r is passed' % data)
values_dict = {}
for key, value in data.items():
items = []
if isinstance(value, six.string_types):
items.append(value)
elif isinstance(value, Iterable):
for v in value:
# Convert to str int values
if isinstance(v, int):
v = str(v)
try:
item = six.u(v)
except TypeError:
item = v
items.append(item)
value = ','.join(items)
values_dict[key] = value
return values_dict
def parse_url_query_params(url, fragment=True):
"""Parse url query params
:param fragment: bool: flag is used for parsing oauth url
:param url: str: url string
:return: dict
"""
parsed_url = urlparse(url)
if fragment:
url_query = parse_qsl(parsed_url.fragment)
else:
url_query = parse_qsl(parsed_url.query)
# login_response_url_query can have multiple key
url_query = dict(url_query)
return url_query
def parse_form_action_url(html, parser=None):
"""Parse <form action="(.+)"> url
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: url str: for example: /login.php?act=security_check&to=&hash=12346
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
forms = parser.find_all('form')
if not forms:
raise VkParseError('Action form is not found in the html \n%s' % html)
if len(forms) > 1:
raise VkParseError('Find more than 1 forms to handle:\n%s', forms)
form = forms[0]
return form.get('action')
def parse_captcha_html(html, response_url):
parser = bs4.BeautifulSoup(html, 'html.parser')
captcha_sid = parser.find('input', {"name": "captcha_sid"}).get("value")
captcha_img = parser.find('img', {"id": "captcha"}).get("src")
captcha_url = urljoin(response_url, captcha_img)
return captcha_sid, captcha_url
def check_html_warnings(html, parser=None):
"""Check html warnings
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:raise VkPageWarningsError: in case of found warnings
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
# Check warnings
warnings = parser.find_all('div', {'class': 'service_msg_warning'})
if warnings:
raise VkPageWarningsError('; '.join([w.get_text() for w in warnings]))
return True
class VerboseHTTPSession(requests.Session):
"""HTTP session based on requests.Session with some extra logging
"""
def __init__(self):
super(VerboseHTTPSession, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def request(self, method, url, **kwargs):
self.logger.debug(
'Request: %s %s, params=%r, data=%r',
method, url, kwargs.get('params'), kwargs.get('data'))
response = super(VerboseHTTPSession, self).request(
method, url, **kwargs)
self.logger.debug(
'Response: %s %s', response.status_code, response.url)
return response
|
prawn-cake/vk-requests | vk_requests/utils.py | check_html_warnings | python | def check_html_warnings(html, parser=None):
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
# Check warnings
warnings = parser.find_all('div', {'class': 'service_msg_warning'})
if warnings:
raise VkPageWarningsError('; '.join([w.get_text() for w in warnings]))
return True | Check html warnings
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:raise VkPageWarningsError: in case of found warnings | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/utils.py#L122-L136 | null | # -*- coding: utf-8 -*-
import logging
from collections import Iterable
import bs4
import requests
import six
from vk_requests.exceptions import VkParseError, VkPageWarningsError
logger = logging.getLogger('vk-requests')
try:
# Python 2
from urllib import urlencode
from urlparse import urlparse, parse_qsl, urljoin
except ImportError:
# Python 3
from urllib.parse import urlparse, parse_qsl, urlencode, urljoin
def stringify_values(data):
"""Coerce iterable values to 'val1,val2,valN'
Example:
fields=['nickname', 'city', 'can_see_all_posts']
--> fields='nickname,city,can_see_all_posts'
:param data: dict
:return: converted values dict
"""
if not isinstance(data, dict):
raise ValueError('Data must be dict. %r is passed' % data)
values_dict = {}
for key, value in data.items():
items = []
if isinstance(value, six.string_types):
items.append(value)
elif isinstance(value, Iterable):
for v in value:
# Convert to str int values
if isinstance(v, int):
v = str(v)
try:
item = six.u(v)
except TypeError:
item = v
items.append(item)
value = ','.join(items)
values_dict[key] = value
return values_dict
def parse_url_query_params(url, fragment=True):
"""Parse url query params
:param fragment: bool: flag is used for parsing oauth url
:param url: str: url string
:return: dict
"""
parsed_url = urlparse(url)
if fragment:
url_query = parse_qsl(parsed_url.fragment)
else:
url_query = parse_qsl(parsed_url.query)
# login_response_url_query can have multiple key
url_query = dict(url_query)
return url_query
def parse_form_action_url(html, parser=None):
"""Parse <form action="(.+)"> url
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: url str: for example: /login.php?act=security_check&to=&hash=12346
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
forms = parser.find_all('form')
if not forms:
raise VkParseError('Action form is not found in the html \n%s' % html)
if len(forms) > 1:
raise VkParseError('Find more than 1 forms to handle:\n%s', forms)
form = forms[0]
return form.get('action')
def parse_captcha_html(html, response_url):
parser = bs4.BeautifulSoup(html, 'html.parser')
captcha_sid = parser.find('input', {"name": "captcha_sid"}).get("value")
captcha_img = parser.find('img', {"id": "captcha"}).get("src")
captcha_url = urljoin(response_url, captcha_img)
return captcha_sid, captcha_url
def parse_masked_phone_number(html, parser=None):
"""Get masked phone number from security check html
:param html: str: raw html text
:param parser: bs4.BeautifulSoup: html parser
:return: tuple of phone prefix and suffix, for example: ('+1234', '89')
:rtype : tuple
"""
if parser is None:
parser = bs4.BeautifulSoup(html, 'html.parser')
fields = parser.find_all('span', {'class': 'field_prefix'})
if not fields:
raise VkParseError(
'No <span class="field_prefix">...</span> in the \n%s' % html)
result = []
for f in fields:
value = f.get_text().replace(six.u('\xa0'), '')
result.append(value)
return tuple(result)
class VerboseHTTPSession(requests.Session):
"""HTTP session based on requests.Session with some extra logging
"""
def __init__(self):
super(VerboseHTTPSession, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
def request(self, method, url, **kwargs):
self.logger.debug(
'Request: %s %s, params=%r, data=%r',
method, url, kwargs.get('params'), kwargs.get('data'))
response = super(VerboseHTTPSession, self).request(
method, url, **kwargs)
self.logger.debug(
'Response: %s %s', response.status_code, response.url)
return response
|
prawn-cake/vk-requests | vk_requests/session.py | VKSession.http_session | python | def http_session(self):
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session | HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L63-L72 | null | class VKSession(object):
API_URL = 'https://api.vk.com/method/'
DEFAULT_HTTP_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
DIRECT_AUTHORIZE_URL = 'https://oauth.vk.com/token'
CAPTCHA_URI = 'https://api.vk.com/captcha.php'
def __init__(self, app_id=None, user_login=None, user_password=None,
phone_number=None, scope='offline', api_version=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""IMPORTANT: (app_id + user_login + user_password) and service_token
are mutually exclusive
"""
self.app_id = app_id
self._login = user_login
self._password = user_password
self._phone_number = phone_number
self._service_token = service_token
self.scope = scope
self.interactive = interactive
self._access_token = None
self._api_version = api_version
self._client_secret = client_secret
self._two_fa_supported = two_fa_supported
self._two_fa_force_sms = two_fa_force_sms
# requests.Session subclass instance
self._http_session = None
# Some API methods get args (e.g. user id) from access token.
# If we define user login, we need get access token now.
if self._login:
self.renew_access_token()
@property
@property
def api_version(self):
return self._api_version
def is_token_required(self):
"""Helper method for vk_requests.auth.VKSession initialization
:return: bool
"""
return any([self.app_id, self._login, self._password])
def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message)
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message)
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message)
def direct_auth_require_2fa(self, session, auth_data):
if self._two_fa_force_sms:
auth_data['force_sms'] = self._two_fa_force_sms
session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
auth_data['code'] = self.get_2fa_code()
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def direct_auth_require_captcha(self, session, response, auth_data):
logger.info('Captcha is needed. Response: %s', response)
captcha_url = response['captcha_img']
logger.info('Captcha url %s', captcha_url)
auth_data['captcha_sid'] = response['captcha_sid']
auth_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def require_2fa(self, html, http_session):
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
action_url = parse_form_action_url(html)
auth_check_code = self.get_2fa_code()
auth_check_data = {
'code': auth_check_code,
'_ajax': '1',
'remember': '1'
}
url = '/'.join([self.LOGIN_URL + action_url])
response = http_session.post(url=url, data=auth_check_data)
return response
def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response
def require_phone_number(self, html, session):
logger.info(
'Auth requires phone number. You do login from unusual place')
# Raises VkPageWarningsError in case of warnings
# NOTE: we check only 'security_check' case on warnings for now
# in future it might be extended for other cases as well
check_html_warnings(html=html)
# Determine form action url
action_url = parse_form_action_url(html)
# Get masked phone from html to make things more clear
phone_prefix, phone_suffix = parse_masked_phone_number(html)
if self._phone_number:
code = self._phone_number[len(phone_prefix):-len(phone_suffix)]
else:
if self.interactive:
prompt = 'Enter missing digits of your phone number (%s****%s): '\
% (phone_prefix, phone_suffix)
code = raw_input(prompt)
else:
raise VkAuthError(
'Phone number is required. Create an API instance using '
'phone_number parameter or use interactive mode')
params = parse_url_query_params(action_url, fragment=False)
auth_data = {
'code': code,
'act': 'security_check',
'hash': params['hash']}
response = session.post(
url=self.LOGIN_URL + action_url, data=auth_data)
logger.debug('require_phone_number resp: %s', response.text)
def get_2fa_code(self):
if self.interactive:
auth_check_code = raw_input('Auth check code: ')
return auth_check_code.strip()
else:
raise VkAuthError(
'Auth check code is needed (SMS, Google Authenticator or '
'one-time password). '
'Use interactive mode to enter the code manually')
@property
def access_token(self):
if self._access_token is None:
self._access_token = self._get_access_token()
return self._access_token
def _get_access_token(self):
"""Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token
"""
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params)
def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually')
def renew_access_token(self):
"""Force to get new access token
"""
self._access_token = self._get_access_token()
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response']
def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response
def __repr__(self): # pragma: no cover
return "%s(api_url='%s', access_token='%s')" % (
self.__class__.__name__, self.API_URL, self._access_token)
|
prawn-cake/vk-requests | vk_requests/session.py | VKSession.do_login | python | def do_login(self, http_session):
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message) | Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L85-L134 | [
"def parse_url_query_params(url, fragment=True):\n \"\"\"Parse url query params\n\n :param fragment: bool: flag is used for parsing oauth url\n :param url: str: url string\n :return: dict\n \"\"\"\n parsed_url = urlparse(url)\n if fragment:\n url_query = parse_qsl(parsed_url.fragment)\n else:\n url_query = parse_qsl(parsed_url.query)\n # login_response_url_query can have multiple key\n url_query = dict(url_query)\n return url_query\n",
"def parse_form_action_url(html, parser=None):\n \"\"\"Parse <form action=\"(.+)\"> url\n\n :param html: str: raw html text\n :param parser: bs4.BeautifulSoup: html parser\n :return: url str: for example: /login.php?act=security_check&to=&hash=12346\n \"\"\"\n if parser is None:\n parser = bs4.BeautifulSoup(html, 'html.parser')\n\n forms = parser.find_all('form')\n if not forms:\n raise VkParseError('Action form is not found in the html \\n%s' % html)\n if len(forms) > 1:\n raise VkParseError('Find more than 1 forms to handle:\\n%s', forms)\n form = forms[0]\n return form.get('action')\n",
"def require_2fa(self, html, http_session):\n logger.info(\n 'User enabled 2 factors authentication. Auth check code is needed '\n '(SMS, Google Authenticator or one-time password generated by vk)')\n action_url = parse_form_action_url(html)\n auth_check_code = self.get_2fa_code()\n auth_check_data = {\n 'code': auth_check_code,\n '_ajax': '1',\n 'remember': '1'\n }\n url = '/'.join([self.LOGIN_URL + action_url])\n response = http_session.post(url=url, data=auth_check_data)\n return response\n",
"def require_auth_captcha(self, response, query_params,\n login_form_data, http_session):\n \"\"\"Resolve auth captcha case\n\n :param response: http response\n :param query_params: dict: response query params, for example:\n {'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}\n\n :param login_form_data: dict\n :param http_session: requests.Session\n :return: :raise VkAuthError:\n \"\"\"\n logger.info('Captcha is needed. Query params: %s', query_params)\n form_text = response.text\n\n action_url = parse_form_action_url(form_text)\n logger.debug('form action url: %s', action_url)\n if not action_url:\n raise VkAuthError('Cannot find form action url')\n\n captcha_sid, captcha_url = parse_captcha_html(\n html=response.text, response_url=response.url)\n logger.info('Captcha url %s', captcha_url)\n\n login_form_data['captcha_sid'] = captcha_sid\n login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)\n\n response = http_session.post(action_url, login_form_data)\n return response\n",
"def require_phone_number(self, html, session):\n logger.info(\n 'Auth requires phone number. You do login from unusual place')\n\n # Raises VkPageWarningsError in case of warnings\n # NOTE: we check only 'security_check' case on warnings for now\n # in future it might be extended for other cases as well\n check_html_warnings(html=html)\n\n # Determine form action url\n action_url = parse_form_action_url(html)\n\n # Get masked phone from html to make things more clear\n phone_prefix, phone_suffix = parse_masked_phone_number(html)\n\n if self._phone_number:\n code = self._phone_number[len(phone_prefix):-len(phone_suffix)]\n else:\n if self.interactive:\n prompt = 'Enter missing digits of your phone number (%s****%s): '\\\n % (phone_prefix, phone_suffix)\n code = raw_input(prompt)\n else:\n raise VkAuthError(\n 'Phone number is required. Create an API instance using '\n 'phone_number parameter or use interactive mode')\n\n params = parse_url_query_params(action_url, fragment=False)\n auth_data = {\n 'code': code,\n 'act': 'security_check',\n 'hash': params['hash']}\n response = session.post(\n url=self.LOGIN_URL + action_url, data=auth_data)\n logger.debug('require_phone_number resp: %s', response.text)\n"
] | class VKSession(object):
API_URL = 'https://api.vk.com/method/'
DEFAULT_HTTP_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
DIRECT_AUTHORIZE_URL = 'https://oauth.vk.com/token'
CAPTCHA_URI = 'https://api.vk.com/captcha.php'
def __init__(self, app_id=None, user_login=None, user_password=None,
phone_number=None, scope='offline', api_version=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""IMPORTANT: (app_id + user_login + user_password) and service_token
are mutually exclusive
"""
self.app_id = app_id
self._login = user_login
self._password = user_password
self._phone_number = phone_number
self._service_token = service_token
self.scope = scope
self.interactive = interactive
self._access_token = None
self._api_version = api_version
self._client_secret = client_secret
self._two_fa_supported = two_fa_supported
self._two_fa_force_sms = two_fa_force_sms
# requests.Session subclass instance
self._http_session = None
# Some API methods get args (e.g. user id) from access token.
# If we define user login, we need get access token now.
if self._login:
self.renew_access_token()
@property
def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session
@property
def api_version(self):
return self._api_version
def is_token_required(self):
"""Helper method for vk_requests.auth.VKSession initialization
:return: bool
"""
return any([self.app_id, self._login, self._password])
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message)
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message)
def direct_auth_require_2fa(self, session, auth_data):
if self._two_fa_force_sms:
auth_data['force_sms'] = self._two_fa_force_sms
session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
auth_data['code'] = self.get_2fa_code()
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def direct_auth_require_captcha(self, session, response, auth_data):
logger.info('Captcha is needed. Response: %s', response)
captcha_url = response['captcha_img']
logger.info('Captcha url %s', captcha_url)
auth_data['captcha_sid'] = response['captcha_sid']
auth_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def require_2fa(self, html, http_session):
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
action_url = parse_form_action_url(html)
auth_check_code = self.get_2fa_code()
auth_check_data = {
'code': auth_check_code,
'_ajax': '1',
'remember': '1'
}
url = '/'.join([self.LOGIN_URL + action_url])
response = http_session.post(url=url, data=auth_check_data)
return response
def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response
def require_phone_number(self, html, session):
logger.info(
'Auth requires phone number. You do login from unusual place')
# Raises VkPageWarningsError in case of warnings
# NOTE: we check only 'security_check' case on warnings for now
# in future it might be extended for other cases as well
check_html_warnings(html=html)
# Determine form action url
action_url = parse_form_action_url(html)
# Get masked phone from html to make things more clear
phone_prefix, phone_suffix = parse_masked_phone_number(html)
if self._phone_number:
code = self._phone_number[len(phone_prefix):-len(phone_suffix)]
else:
if self.interactive:
prompt = 'Enter missing digits of your phone number (%s****%s): '\
% (phone_prefix, phone_suffix)
code = raw_input(prompt)
else:
raise VkAuthError(
'Phone number is required. Create an API instance using '
'phone_number parameter or use interactive mode')
params = parse_url_query_params(action_url, fragment=False)
auth_data = {
'code': code,
'act': 'security_check',
'hash': params['hash']}
response = session.post(
url=self.LOGIN_URL + action_url, data=auth_data)
logger.debug('require_phone_number resp: %s', response.text)
def get_2fa_code(self):
if self.interactive:
auth_check_code = raw_input('Auth check code: ')
return auth_check_code.strip()
else:
raise VkAuthError(
'Auth check code is needed (SMS, Google Authenticator or '
'one-time password). '
'Use interactive mode to enter the code manually')
@property
def access_token(self):
if self._access_token is None:
self._access_token = self._get_access_token()
return self._access_token
def _get_access_token(self):
"""Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token
"""
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params)
def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually')
def renew_access_token(self):
"""Force to get new access token
"""
self._access_token = self._get_access_token()
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response']
def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response
def __repr__(self): # pragma: no cover
return "%s(api_url='%s', access_token='%s')" % (
self.__class__.__name__, self.API_URL, self._access_token)
|
prawn-cake/vk-requests | vk_requests/session.py | VKSession.do_implicit_flow_authorization | python | def do_implicit_flow_authorization(self, session):
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message) | Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L136-L177 | [
"def parse_url_query_params(url, fragment=True):\n \"\"\"Parse url query params\n\n :param fragment: bool: flag is used for parsing oauth url\n :param url: str: url string\n :return: dict\n \"\"\"\n parsed_url = urlparse(url)\n if fragment:\n url_query = parse_qsl(parsed_url.fragment)\n else:\n url_query = parse_qsl(parsed_url.query)\n # login_response_url_query can have multiple key\n url_query = dict(url_query)\n return url_query\n",
"def parse_form_action_url(html, parser=None):\n \"\"\"Parse <form action=\"(.+)\"> url\n\n :param html: str: raw html text\n :param parser: bs4.BeautifulSoup: html parser\n :return: url str: for example: /login.php?act=security_check&to=&hash=12346\n \"\"\"\n if parser is None:\n parser = bs4.BeautifulSoup(html, 'html.parser')\n\n forms = parser.find_all('form')\n if not forms:\n raise VkParseError('Action form is not found in the html \\n%s' % html)\n if len(forms) > 1:\n raise VkParseError('Find more than 1 forms to handle:\\n%s', forms)\n form = forms[0]\n return form.get('action')\n",
"def stringify_values(data):\n \"\"\"Coerce iterable values to 'val1,val2,valN'\n\n Example:\n fields=['nickname', 'city', 'can_see_all_posts']\n --> fields='nickname,city,can_see_all_posts'\n\n :param data: dict\n :return: converted values dict\n \"\"\"\n if not isinstance(data, dict):\n raise ValueError('Data must be dict. %r is passed' % data)\n\n values_dict = {}\n for key, value in data.items():\n items = []\n if isinstance(value, six.string_types):\n items.append(value)\n elif isinstance(value, Iterable):\n for v in value:\n # Convert to str int values\n if isinstance(v, int):\n v = str(v)\n try:\n item = six.u(v)\n except TypeError:\n item = v\n items.append(item)\n value = ','.join(items)\n values_dict[key] = value\n return values_dict\n"
] | class VKSession(object):
API_URL = 'https://api.vk.com/method/'
DEFAULT_HTTP_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
DIRECT_AUTHORIZE_URL = 'https://oauth.vk.com/token'
CAPTCHA_URI = 'https://api.vk.com/captcha.php'
def __init__(self, app_id=None, user_login=None, user_password=None,
phone_number=None, scope='offline', api_version=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""IMPORTANT: (app_id + user_login + user_password) and service_token
are mutually exclusive
"""
self.app_id = app_id
self._login = user_login
self._password = user_password
self._phone_number = phone_number
self._service_token = service_token
self.scope = scope
self.interactive = interactive
self._access_token = None
self._api_version = api_version
self._client_secret = client_secret
self._two_fa_supported = two_fa_supported
self._two_fa_force_sms = two_fa_force_sms
# requests.Session subclass instance
self._http_session = None
# Some API methods get args (e.g. user id) from access token.
# If we define user login, we need get access token now.
if self._login:
self.renew_access_token()
@property
def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session
@property
def api_version(self):
return self._api_version
def is_token_required(self):
"""Helper method for vk_requests.auth.VKSession initialization
:return: bool
"""
return any([self.app_id, self._login, self._password])
def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message)
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message)
def direct_auth_require_2fa(self, session, auth_data):
if self._two_fa_force_sms:
auth_data['force_sms'] = self._two_fa_force_sms
session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
auth_data['code'] = self.get_2fa_code()
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def direct_auth_require_captcha(self, session, response, auth_data):
logger.info('Captcha is needed. Response: %s', response)
captcha_url = response['captcha_img']
logger.info('Captcha url %s', captcha_url)
auth_data['captcha_sid'] = response['captcha_sid']
auth_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def require_2fa(self, html, http_session):
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
action_url = parse_form_action_url(html)
auth_check_code = self.get_2fa_code()
auth_check_data = {
'code': auth_check_code,
'_ajax': '1',
'remember': '1'
}
url = '/'.join([self.LOGIN_URL + action_url])
response = http_session.post(url=url, data=auth_check_data)
return response
def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response
def require_phone_number(self, html, session):
logger.info(
'Auth requires phone number. You do login from unusual place')
# Raises VkPageWarningsError in case of warnings
# NOTE: we check only 'security_check' case on warnings for now
# in future it might be extended for other cases as well
check_html_warnings(html=html)
# Determine form action url
action_url = parse_form_action_url(html)
# Get masked phone from html to make things more clear
phone_prefix, phone_suffix = parse_masked_phone_number(html)
if self._phone_number:
code = self._phone_number[len(phone_prefix):-len(phone_suffix)]
else:
if self.interactive:
prompt = 'Enter missing digits of your phone number (%s****%s): '\
% (phone_prefix, phone_suffix)
code = raw_input(prompt)
else:
raise VkAuthError(
'Phone number is required. Create an API instance using '
'phone_number parameter or use interactive mode')
params = parse_url_query_params(action_url, fragment=False)
auth_data = {
'code': code,
'act': 'security_check',
'hash': params['hash']}
response = session.post(
url=self.LOGIN_URL + action_url, data=auth_data)
logger.debug('require_phone_number resp: %s', response.text)
def get_2fa_code(self):
if self.interactive:
auth_check_code = raw_input('Auth check code: ')
return auth_check_code.strip()
else:
raise VkAuthError(
'Auth check code is needed (SMS, Google Authenticator or '
'one-time password). '
'Use interactive mode to enter the code manually')
@property
def access_token(self):
if self._access_token is None:
self._access_token = self._get_access_token()
return self._access_token
def _get_access_token(self):
"""Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token
"""
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params)
def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually')
def renew_access_token(self):
"""Force to get new access token
"""
self._access_token = self._get_access_token()
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response']
def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response
def __repr__(self): # pragma: no cover
return "%s(api_url='%s', access_token='%s')" % (
self.__class__.__name__, self.API_URL, self._access_token)
|
prawn-cake/vk-requests | vk_requests/session.py | VKSession.do_direct_authorization | python | def do_direct_authorization(self, session):
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message) | Direct Authorization, more info: https://vk.com/dev/auth_direct | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L179-L211 | [
"def stringify_values(data):\n \"\"\"Coerce iterable values to 'val1,val2,valN'\n\n Example:\n fields=['nickname', 'city', 'can_see_all_posts']\n --> fields='nickname,city,can_see_all_posts'\n\n :param data: dict\n :return: converted values dict\n \"\"\"\n if not isinstance(data, dict):\n raise ValueError('Data must be dict. %r is passed' % data)\n\n values_dict = {}\n for key, value in data.items():\n items = []\n if isinstance(value, six.string_types):\n items.append(value)\n elif isinstance(value, Iterable):\n for v in value:\n # Convert to str int values\n if isinstance(v, int):\n v = str(v)\n try:\n item = six.u(v)\n except TypeError:\n item = v\n items.append(item)\n value = ','.join(items)\n values_dict[key] = value\n return values_dict\n",
"def direct_auth_require_2fa(self, session, auth_data):\n if self._two_fa_force_sms:\n auth_data['force_sms'] = self._two_fa_force_sms\n session.post(url=self.DIRECT_AUTHORIZE_URL,\n data=stringify_values(auth_data))\n logger.info(\n 'User enabled 2 factors authentication. Auth check code is needed '\n '(SMS, Google Authenticator or one-time password generated by vk)')\n auth_data['code'] = self.get_2fa_code()\n response = session.post(url=self.DIRECT_AUTHORIZE_URL,\n data=stringify_values(auth_data))\n try:\n response_json = response.json()\n except ValueError: # not JSON in response\n error_message = 'OAuth2 grant access error'\n logger.error(response.text)\n raise VkAuthError(error_message)\n return response_json\n",
"def direct_auth_require_captcha(self, session, response, auth_data):\n logger.info('Captcha is needed. Response: %s', response)\n\n captcha_url = response['captcha_img']\n logger.info('Captcha url %s', captcha_url)\n\n auth_data['captcha_sid'] = response['captcha_sid']\n auth_data['captcha_key'] = self.get_captcha_key(captcha_url)\n\n response = session.post(url=self.DIRECT_AUTHORIZE_URL,\n data=stringify_values(auth_data))\n try:\n response_json = response.json()\n except ValueError: # not JSON in response\n error_message = 'OAuth2 grant access error'\n logger.error(response.text)\n raise VkAuthError(error_message)\n return response_json\n"
] | class VKSession(object):
API_URL = 'https://api.vk.com/method/'
DEFAULT_HTTP_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
DIRECT_AUTHORIZE_URL = 'https://oauth.vk.com/token'
CAPTCHA_URI = 'https://api.vk.com/captcha.php'
def __init__(self, app_id=None, user_login=None, user_password=None,
phone_number=None, scope='offline', api_version=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""IMPORTANT: (app_id + user_login + user_password) and service_token
are mutually exclusive
"""
self.app_id = app_id
self._login = user_login
self._password = user_password
self._phone_number = phone_number
self._service_token = service_token
self.scope = scope
self.interactive = interactive
self._access_token = None
self._api_version = api_version
self._client_secret = client_secret
self._two_fa_supported = two_fa_supported
self._two_fa_force_sms = two_fa_force_sms
# requests.Session subclass instance
self._http_session = None
# Some API methods get args (e.g. user id) from access token.
# If we define user login, we need get access token now.
if self._login:
self.renew_access_token()
@property
def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session
@property
def api_version(self):
return self._api_version
def is_token_required(self):
"""Helper method for vk_requests.auth.VKSession initialization
:return: bool
"""
return any([self.app_id, self._login, self._password])
def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message)
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message)
def direct_auth_require_2fa(self, session, auth_data):
if self._two_fa_force_sms:
auth_data['force_sms'] = self._two_fa_force_sms
session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
auth_data['code'] = self.get_2fa_code()
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def direct_auth_require_captcha(self, session, response, auth_data):
logger.info('Captcha is needed. Response: %s', response)
captcha_url = response['captcha_img']
logger.info('Captcha url %s', captcha_url)
auth_data['captcha_sid'] = response['captcha_sid']
auth_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def require_2fa(self, html, http_session):
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
action_url = parse_form_action_url(html)
auth_check_code = self.get_2fa_code()
auth_check_data = {
'code': auth_check_code,
'_ajax': '1',
'remember': '1'
}
url = '/'.join([self.LOGIN_URL + action_url])
response = http_session.post(url=url, data=auth_check_data)
return response
def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response
def require_phone_number(self, html, session):
logger.info(
'Auth requires phone number. You do login from unusual place')
# Raises VkPageWarningsError in case of warnings
# NOTE: we check only 'security_check' case on warnings for now
# in future it might be extended for other cases as well
check_html_warnings(html=html)
# Determine form action url
action_url = parse_form_action_url(html)
# Get masked phone from html to make things more clear
phone_prefix, phone_suffix = parse_masked_phone_number(html)
if self._phone_number:
code = self._phone_number[len(phone_prefix):-len(phone_suffix)]
else:
if self.interactive:
prompt = 'Enter missing digits of your phone number (%s****%s): '\
% (phone_prefix, phone_suffix)
code = raw_input(prompt)
else:
raise VkAuthError(
'Phone number is required. Create an API instance using '
'phone_number parameter or use interactive mode')
params = parse_url_query_params(action_url, fragment=False)
auth_data = {
'code': code,
'act': 'security_check',
'hash': params['hash']}
response = session.post(
url=self.LOGIN_URL + action_url, data=auth_data)
logger.debug('require_phone_number resp: %s', response.text)
def get_2fa_code(self):
if self.interactive:
auth_check_code = raw_input('Auth check code: ')
return auth_check_code.strip()
else:
raise VkAuthError(
'Auth check code is needed (SMS, Google Authenticator or '
'one-time password). '
'Use interactive mode to enter the code manually')
@property
def access_token(self):
if self._access_token is None:
self._access_token = self._get_access_token()
return self._access_token
def _get_access_token(self):
"""Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token
"""
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params)
def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually')
def renew_access_token(self):
"""Force to get new access token
"""
self._access_token = self._get_access_token()
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response']
def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response
def __repr__(self): # pragma: no cover
return "%s(api_url='%s', access_token='%s')" % (
self.__class__.__name__, self.API_URL, self._access_token)
|
prawn-cake/vk-requests | vk_requests/session.py | VKSession.require_auth_captcha | python | def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response | Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError: | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L266-L294 | [
"def parse_form_action_url(html, parser=None):\n \"\"\"Parse <form action=\"(.+)\"> url\n\n :param html: str: raw html text\n :param parser: bs4.BeautifulSoup: html parser\n :return: url str: for example: /login.php?act=security_check&to=&hash=12346\n \"\"\"\n if parser is None:\n parser = bs4.BeautifulSoup(html, 'html.parser')\n\n forms = parser.find_all('form')\n if not forms:\n raise VkParseError('Action form is not found in the html \\n%s' % html)\n if len(forms) > 1:\n raise VkParseError('Find more than 1 forms to handle:\\n%s', forms)\n form = forms[0]\n return form.get('action')\n",
"def parse_captcha_html(html, response_url):\n parser = bs4.BeautifulSoup(html, 'html.parser')\n captcha_sid = parser.find('input', {\"name\": \"captcha_sid\"}).get(\"value\")\n captcha_img = parser.find('img', {\"id\": \"captcha\"}).get(\"src\")\n captcha_url = urljoin(response_url, captcha_img)\n return captcha_sid, captcha_url\n",
"def get_captcha_key(self, captcha_image_url):\n \"\"\"Read CAPTCHA key from user input\"\"\"\n\n if self.interactive:\n print('Open CAPTCHA image url in your browser and enter it below: ',\n captcha_image_url)\n captcha_key = raw_input('Enter CAPTCHA key: ')\n return captcha_key\n else:\n raise VkAuthError(\n 'Captcha is required. Use interactive mode to enter it '\n 'manually')\n"
] | class VKSession(object):
API_URL = 'https://api.vk.com/method/'
DEFAULT_HTTP_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
DIRECT_AUTHORIZE_URL = 'https://oauth.vk.com/token'
CAPTCHA_URI = 'https://api.vk.com/captcha.php'
def __init__(self, app_id=None, user_login=None, user_password=None,
phone_number=None, scope='offline', api_version=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""IMPORTANT: (app_id + user_login + user_password) and service_token
are mutually exclusive
"""
self.app_id = app_id
self._login = user_login
self._password = user_password
self._phone_number = phone_number
self._service_token = service_token
self.scope = scope
self.interactive = interactive
self._access_token = None
self._api_version = api_version
self._client_secret = client_secret
self._two_fa_supported = two_fa_supported
self._two_fa_force_sms = two_fa_force_sms
# requests.Session subclass instance
self._http_session = None
# Some API methods get args (e.g. user id) from access token.
# If we define user login, we need get access token now.
if self._login:
self.renew_access_token()
@property
def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session
@property
def api_version(self):
return self._api_version
def is_token_required(self):
"""Helper method for vk_requests.auth.VKSession initialization
:return: bool
"""
return any([self.app_id, self._login, self._password])
def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message)
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message)
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message)
def direct_auth_require_2fa(self, session, auth_data):
if self._two_fa_force_sms:
auth_data['force_sms'] = self._two_fa_force_sms
session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
auth_data['code'] = self.get_2fa_code()
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def direct_auth_require_captcha(self, session, response, auth_data):
logger.info('Captcha is needed. Response: %s', response)
captcha_url = response['captcha_img']
logger.info('Captcha url %s', captcha_url)
auth_data['captcha_sid'] = response['captcha_sid']
auth_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def require_2fa(self, html, http_session):
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
action_url = parse_form_action_url(html)
auth_check_code = self.get_2fa_code()
auth_check_data = {
'code': auth_check_code,
'_ajax': '1',
'remember': '1'
}
url = '/'.join([self.LOGIN_URL + action_url])
response = http_session.post(url=url, data=auth_check_data)
return response
def require_phone_number(self, html, session):
logger.info(
'Auth requires phone number. You do login from unusual place')
# Raises VkPageWarningsError in case of warnings
# NOTE: we check only 'security_check' case on warnings for now
# in future it might be extended for other cases as well
check_html_warnings(html=html)
# Determine form action url
action_url = parse_form_action_url(html)
# Get masked phone from html to make things more clear
phone_prefix, phone_suffix = parse_masked_phone_number(html)
if self._phone_number:
code = self._phone_number[len(phone_prefix):-len(phone_suffix)]
else:
if self.interactive:
prompt = 'Enter missing digits of your phone number (%s****%s): '\
% (phone_prefix, phone_suffix)
code = raw_input(prompt)
else:
raise VkAuthError(
'Phone number is required. Create an API instance using '
'phone_number parameter or use interactive mode')
params = parse_url_query_params(action_url, fragment=False)
auth_data = {
'code': code,
'act': 'security_check',
'hash': params['hash']}
response = session.post(
url=self.LOGIN_URL + action_url, data=auth_data)
logger.debug('require_phone_number resp: %s', response.text)
def get_2fa_code(self):
if self.interactive:
auth_check_code = raw_input('Auth check code: ')
return auth_check_code.strip()
else:
raise VkAuthError(
'Auth check code is needed (SMS, Google Authenticator or '
'one-time password). '
'Use interactive mode to enter the code manually')
@property
def access_token(self):
if self._access_token is None:
self._access_token = self._get_access_token()
return self._access_token
def _get_access_token(self):
"""Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token
"""
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params)
def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually')
def renew_access_token(self):
"""Force to get new access token
"""
self._access_token = self._get_access_token()
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response']
def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response
def __repr__(self): # pragma: no cover
return "%s(api_url='%s', access_token='%s')" % (
self.__class__.__name__, self.API_URL, self._access_token)
|
prawn-cake/vk-requests | vk_requests/session.py | VKSession._get_access_token | python | def _get_access_token(self):
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params) | Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L348-L377 | null | class VKSession(object):
API_URL = 'https://api.vk.com/method/'
DEFAULT_HTTP_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
DIRECT_AUTHORIZE_URL = 'https://oauth.vk.com/token'
CAPTCHA_URI = 'https://api.vk.com/captcha.php'
def __init__(self, app_id=None, user_login=None, user_password=None,
phone_number=None, scope='offline', api_version=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""IMPORTANT: (app_id + user_login + user_password) and service_token
are mutually exclusive
"""
self.app_id = app_id
self._login = user_login
self._password = user_password
self._phone_number = phone_number
self._service_token = service_token
self.scope = scope
self.interactive = interactive
self._access_token = None
self._api_version = api_version
self._client_secret = client_secret
self._two_fa_supported = two_fa_supported
self._two_fa_force_sms = two_fa_force_sms
# requests.Session subclass instance
self._http_session = None
# Some API methods get args (e.g. user id) from access token.
# If we define user login, we need get access token now.
if self._login:
self.renew_access_token()
@property
def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session
@property
def api_version(self):
return self._api_version
def is_token_required(self):
"""Helper method for vk_requests.auth.VKSession initialization
:return: bool
"""
return any([self.app_id, self._login, self._password])
def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message)
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message)
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message)
def direct_auth_require_2fa(self, session, auth_data):
if self._two_fa_force_sms:
auth_data['force_sms'] = self._two_fa_force_sms
session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
auth_data['code'] = self.get_2fa_code()
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def direct_auth_require_captcha(self, session, response, auth_data):
logger.info('Captcha is needed. Response: %s', response)
captcha_url = response['captcha_img']
logger.info('Captcha url %s', captcha_url)
auth_data['captcha_sid'] = response['captcha_sid']
auth_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def require_2fa(self, html, http_session):
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
action_url = parse_form_action_url(html)
auth_check_code = self.get_2fa_code()
auth_check_data = {
'code': auth_check_code,
'_ajax': '1',
'remember': '1'
}
url = '/'.join([self.LOGIN_URL + action_url])
response = http_session.post(url=url, data=auth_check_data)
return response
def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response
def require_phone_number(self, html, session):
logger.info(
'Auth requires phone number. You do login from unusual place')
# Raises VkPageWarningsError in case of warnings
# NOTE: we check only 'security_check' case on warnings for now
# in future it might be extended for other cases as well
check_html_warnings(html=html)
# Determine form action url
action_url = parse_form_action_url(html)
# Get masked phone from html to make things more clear
phone_prefix, phone_suffix = parse_masked_phone_number(html)
if self._phone_number:
code = self._phone_number[len(phone_prefix):-len(phone_suffix)]
else:
if self.interactive:
prompt = 'Enter missing digits of your phone number (%s****%s): '\
% (phone_prefix, phone_suffix)
code = raw_input(prompt)
else:
raise VkAuthError(
'Phone number is required. Create an API instance using '
'phone_number parameter or use interactive mode')
params = parse_url_query_params(action_url, fragment=False)
auth_data = {
'code': code,
'act': 'security_check',
'hash': params['hash']}
response = session.post(
url=self.LOGIN_URL + action_url, data=auth_data)
logger.debug('require_phone_number resp: %s', response.text)
def get_2fa_code(self):
if self.interactive:
auth_check_code = raw_input('Auth check code: ')
return auth_check_code.strip()
else:
raise VkAuthError(
'Auth check code is needed (SMS, Google Authenticator or '
'one-time password). '
'Use interactive mode to enter the code manually')
@property
def access_token(self):
if self._access_token is None:
self._access_token = self._get_access_token()
return self._access_token
def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually')
def renew_access_token(self):
"""Force to get new access token
"""
self._access_token = self._get_access_token()
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response']
def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response
def __repr__(self): # pragma: no cover
return "%s(api_url='%s', access_token='%s')" % (
self.__class__.__name__, self.API_URL, self._access_token)
|
prawn-cake/vk-requests | vk_requests/session.py | VKSession.get_captcha_key | python | def get_captcha_key(self, captcha_image_url):
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually') | Read CAPTCHA key from user input | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L379-L390 | null | class VKSession(object):
API_URL = 'https://api.vk.com/method/'
DEFAULT_HTTP_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
DIRECT_AUTHORIZE_URL = 'https://oauth.vk.com/token'
CAPTCHA_URI = 'https://api.vk.com/captcha.php'
def __init__(self, app_id=None, user_login=None, user_password=None,
phone_number=None, scope='offline', api_version=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""IMPORTANT: (app_id + user_login + user_password) and service_token
are mutually exclusive
"""
self.app_id = app_id
self._login = user_login
self._password = user_password
self._phone_number = phone_number
self._service_token = service_token
self.scope = scope
self.interactive = interactive
self._access_token = None
self._api_version = api_version
self._client_secret = client_secret
self._two_fa_supported = two_fa_supported
self._two_fa_force_sms = two_fa_force_sms
# requests.Session subclass instance
self._http_session = None
# Some API methods get args (e.g. user id) from access token.
# If we define user login, we need get access token now.
if self._login:
self.renew_access_token()
@property
def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session
@property
def api_version(self):
return self._api_version
def is_token_required(self):
"""Helper method for vk_requests.auth.VKSession initialization
:return: bool
"""
return any([self.app_id, self._login, self._password])
def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message)
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message)
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message)
def direct_auth_require_2fa(self, session, auth_data):
if self._two_fa_force_sms:
auth_data['force_sms'] = self._two_fa_force_sms
session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
auth_data['code'] = self.get_2fa_code()
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def direct_auth_require_captcha(self, session, response, auth_data):
logger.info('Captcha is needed. Response: %s', response)
captcha_url = response['captcha_img']
logger.info('Captcha url %s', captcha_url)
auth_data['captcha_sid'] = response['captcha_sid']
auth_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def require_2fa(self, html, http_session):
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
action_url = parse_form_action_url(html)
auth_check_code = self.get_2fa_code()
auth_check_data = {
'code': auth_check_code,
'_ajax': '1',
'remember': '1'
}
url = '/'.join([self.LOGIN_URL + action_url])
response = http_session.post(url=url, data=auth_check_data)
return response
def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response
def require_phone_number(self, html, session):
logger.info(
'Auth requires phone number. You do login from unusual place')
# Raises VkPageWarningsError in case of warnings
# NOTE: we check only 'security_check' case on warnings for now
# in future it might be extended for other cases as well
check_html_warnings(html=html)
# Determine form action url
action_url = parse_form_action_url(html)
# Get masked phone from html to make things more clear
phone_prefix, phone_suffix = parse_masked_phone_number(html)
if self._phone_number:
code = self._phone_number[len(phone_prefix):-len(phone_suffix)]
else:
if self.interactive:
prompt = 'Enter missing digits of your phone number (%s****%s): '\
% (phone_prefix, phone_suffix)
code = raw_input(prompt)
else:
raise VkAuthError(
'Phone number is required. Create an API instance using '
'phone_number parameter or use interactive mode')
params = parse_url_query_params(action_url, fragment=False)
auth_data = {
'code': code,
'act': 'security_check',
'hash': params['hash']}
response = session.post(
url=self.LOGIN_URL + action_url, data=auth_data)
logger.debug('require_phone_number resp: %s', response.text)
def get_2fa_code(self):
if self.interactive:
auth_check_code = raw_input('Auth check code: ')
return auth_check_code.strip()
else:
raise VkAuthError(
'Auth check code is needed (SMS, Google Authenticator or '
'one-time password). '
'Use interactive mode to enter the code manually')
@property
def access_token(self):
if self._access_token is None:
self._access_token = self._get_access_token()
return self._access_token
def _get_access_token(self):
"""Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token
"""
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params)
def renew_access_token(self):
"""Force to get new access token
"""
self._access_token = self._get_access_token()
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response']
def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response
def __repr__(self): # pragma: no cover
return "%s(api_url='%s', access_token='%s')" % (
self.__class__.__name__, self.API_URL, self._access_token)
|
prawn-cake/vk-requests | vk_requests/session.py | VKSession.make_request | python | def make_request(self, request, captcha_response=None):
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response'] | Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L398-L442 | [
"def is_captcha_needed(self):\n return self.code == CAPTCHA_IS_NEEDED\n",
"def get_captcha_key(self, captcha_image_url):\n \"\"\"Read CAPTCHA key from user input\"\"\"\n\n if self.interactive:\n print('Open CAPTCHA image url in your browser and enter it below: ',\n captcha_image_url)\n captcha_key = raw_input('Enter CAPTCHA key: ')\n return captcha_key\n else:\n raise VkAuthError(\n 'Captcha is required. Use interactive mode to enter it '\n 'manually')\n",
"def make_request(self, request, captcha_response=None):\n \"\"\"Make api request helper function\n\n :param request: vk_requests.api.Request instance\n :param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}\n :return: dict: json decoded http response\n \"\"\"\n logger.debug('Prepare API Method request %r', request)\n response = self._send_api_request(request=request,\n captcha_response=captcha_response)\n response.raise_for_status()\n response_or_error = json.loads(response.text)\n logger.debug('response: %s', response_or_error)\n\n if 'error' in response_or_error:\n error_data = response_or_error['error']\n vk_error = VkAPIError(error_data)\n\n if vk_error.is_captcha_needed():\n captcha_key = self.get_captcha_key(vk_error.captcha_img_url)\n if not captcha_key:\n raise vk_error\n\n # Retry http request with captcha info attached\n captcha_response = {\n 'sid': vk_error.captcha_sid,\n 'key': captcha_key,\n }\n return self.make_request(\n request, captcha_response=captcha_response)\n\n elif vk_error.is_access_token_incorrect():\n logger.info(\n 'Authorization failed. Access token will be dropped')\n self._access_token = None\n return self.make_request(request)\n\n else:\n raise vk_error\n elif 'execute_errors' in response_or_error:\n # can take place while running .execute vk method\n # See more: https://vk.com/dev/execute\n raise VkAPIError(response_or_error['execute_errors'][0])\n elif 'response' in response_or_error:\n return response_or_error['response']\n",
"def _send_api_request(self, request, captcha_response=None):\n \"\"\"Prepare and send HTTP API request\n\n :param request: vk_requests.api.Request instance\n :param captcha_response: None or dict \n :return: HTTP response\n \"\"\"\n url = self.API_URL + request.method_name\n\n # Prepare request arguments\n method_kwargs = {'v': self.api_version}\n\n # Shape up the request data\n for values in (request.method_args,):\n method_kwargs.update(stringify_values(values))\n\n if self.is_token_required() or self._service_token:\n # Auth api call if access_token hadn't been gotten earlier\n method_kwargs['access_token'] = self.access_token\n\n if captcha_response:\n method_kwargs['captcha_sid'] = captcha_response['sid']\n method_kwargs['captcha_key'] = captcha_response['key']\n\n http_params = dict(url=url,\n data=method_kwargs,\n **request.http_params)\n logger.debug('send_api_request:http_params: %s', http_params)\n response = self.http_session.post(**http_params)\n return response\n"
] | class VKSession(object):
API_URL = 'https://api.vk.com/method/'
DEFAULT_HTTP_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
DIRECT_AUTHORIZE_URL = 'https://oauth.vk.com/token'
CAPTCHA_URI = 'https://api.vk.com/captcha.php'
def __init__(self, app_id=None, user_login=None, user_password=None,
phone_number=None, scope='offline', api_version=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""IMPORTANT: (app_id + user_login + user_password) and service_token
are mutually exclusive
"""
self.app_id = app_id
self._login = user_login
self._password = user_password
self._phone_number = phone_number
self._service_token = service_token
self.scope = scope
self.interactive = interactive
self._access_token = None
self._api_version = api_version
self._client_secret = client_secret
self._two_fa_supported = two_fa_supported
self._two_fa_force_sms = two_fa_force_sms
# requests.Session subclass instance
self._http_session = None
# Some API methods get args (e.g. user id) from access token.
# If we define user login, we need get access token now.
if self._login:
self.renew_access_token()
@property
def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session
@property
def api_version(self):
return self._api_version
def is_token_required(self):
"""Helper method for vk_requests.auth.VKSession initialization
:return: bool
"""
return any([self.app_id, self._login, self._password])
def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message)
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message)
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message)
def direct_auth_require_2fa(self, session, auth_data):
if self._two_fa_force_sms:
auth_data['force_sms'] = self._two_fa_force_sms
session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
auth_data['code'] = self.get_2fa_code()
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def direct_auth_require_captcha(self, session, response, auth_data):
logger.info('Captcha is needed. Response: %s', response)
captcha_url = response['captcha_img']
logger.info('Captcha url %s', captcha_url)
auth_data['captcha_sid'] = response['captcha_sid']
auth_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def require_2fa(self, html, http_session):
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
action_url = parse_form_action_url(html)
auth_check_code = self.get_2fa_code()
auth_check_data = {
'code': auth_check_code,
'_ajax': '1',
'remember': '1'
}
url = '/'.join([self.LOGIN_URL + action_url])
response = http_session.post(url=url, data=auth_check_data)
return response
def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response
def require_phone_number(self, html, session):
logger.info(
'Auth requires phone number. You do login from unusual place')
# Raises VkPageWarningsError in case of warnings
# NOTE: we check only 'security_check' case on warnings for now
# in future it might be extended for other cases as well
check_html_warnings(html=html)
# Determine form action url
action_url = parse_form_action_url(html)
# Get masked phone from html to make things more clear
phone_prefix, phone_suffix = parse_masked_phone_number(html)
if self._phone_number:
code = self._phone_number[len(phone_prefix):-len(phone_suffix)]
else:
if self.interactive:
prompt = 'Enter missing digits of your phone number (%s****%s): '\
% (phone_prefix, phone_suffix)
code = raw_input(prompt)
else:
raise VkAuthError(
'Phone number is required. Create an API instance using '
'phone_number parameter or use interactive mode')
params = parse_url_query_params(action_url, fragment=False)
auth_data = {
'code': code,
'act': 'security_check',
'hash': params['hash']}
response = session.post(
url=self.LOGIN_URL + action_url, data=auth_data)
logger.debug('require_phone_number resp: %s', response.text)
def get_2fa_code(self):
if self.interactive:
auth_check_code = raw_input('Auth check code: ')
return auth_check_code.strip()
else:
raise VkAuthError(
'Auth check code is needed (SMS, Google Authenticator or '
'one-time password). '
'Use interactive mode to enter the code manually')
@property
def access_token(self):
if self._access_token is None:
self._access_token = self._get_access_token()
return self._access_token
def _get_access_token(self):
"""Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token
"""
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params)
def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually')
def renew_access_token(self):
"""Force to get new access token
"""
self._access_token = self._get_access_token()
def _send_api_request(self, request, captcha_response=None):
"""Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response
"""
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response
def __repr__(self): # pragma: no cover
return "%s(api_url='%s', access_token='%s')" % (
self.__class__.__name__, self.API_URL, self._access_token)
|
prawn-cake/vk-requests | vk_requests/session.py | VKSession._send_api_request | python | def _send_api_request(self, request, captcha_response=None):
url = self.API_URL + request.method_name
# Prepare request arguments
method_kwargs = {'v': self.api_version}
# Shape up the request data
for values in (request.method_args,):
method_kwargs.update(stringify_values(values))
if self.is_token_required() or self._service_token:
# Auth api call if access_token hadn't been gotten earlier
method_kwargs['access_token'] = self.access_token
if captcha_response:
method_kwargs['captcha_sid'] = captcha_response['sid']
method_kwargs['captcha_key'] = captcha_response['key']
http_params = dict(url=url,
data=method_kwargs,
**request.http_params)
logger.debug('send_api_request:http_params: %s', http_params)
response = self.http_session.post(**http_params)
return response | Prepare and send HTTP API request
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict
:return: HTTP response | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/session.py#L444-L473 | [
"def stringify_values(data):\n \"\"\"Coerce iterable values to 'val1,val2,valN'\n\n Example:\n fields=['nickname', 'city', 'can_see_all_posts']\n --> fields='nickname,city,can_see_all_posts'\n\n :param data: dict\n :return: converted values dict\n \"\"\"\n if not isinstance(data, dict):\n raise ValueError('Data must be dict. %r is passed' % data)\n\n values_dict = {}\n for key, value in data.items():\n items = []\n if isinstance(value, six.string_types):\n items.append(value)\n elif isinstance(value, Iterable):\n for v in value:\n # Convert to str int values\n if isinstance(v, int):\n v = str(v)\n try:\n item = six.u(v)\n except TypeError:\n item = v\n items.append(item)\n value = ','.join(items)\n values_dict[key] = value\n return values_dict\n",
"def is_token_required(self):\n \"\"\"Helper method for vk_requests.auth.VKSession initialization\n\n :return: bool\n \"\"\"\n return any([self.app_id, self._login, self._password])\n"
] | class VKSession(object):
API_URL = 'https://api.vk.com/method/'
DEFAULT_HTTP_HEADERS = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded'
}
LOGIN_URL = 'https://m.vk.com'
AUTHORIZE_URL = 'https://oauth.vk.com/authorize'
DIRECT_AUTHORIZE_URL = 'https://oauth.vk.com/token'
CAPTCHA_URI = 'https://api.vk.com/captcha.php'
def __init__(self, app_id=None, user_login=None, user_password=None,
phone_number=None, scope='offline', api_version=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
"""IMPORTANT: (app_id + user_login + user_password) and service_token
are mutually exclusive
"""
self.app_id = app_id
self._login = user_login
self._password = user_password
self._phone_number = phone_number
self._service_token = service_token
self.scope = scope
self.interactive = interactive
self._access_token = None
self._api_version = api_version
self._client_secret = client_secret
self._two_fa_supported = two_fa_supported
self._two_fa_force_sms = two_fa_force_sms
# requests.Session subclass instance
self._http_session = None
# Some API methods get args (e.g. user id) from access token.
# If we define user login, we need get access token now.
if self._login:
self.renew_access_token()
@property
def http_session(self):
"""HTTP Session property
:return: vk_requests.utils.VerboseHTTPSession instance
"""
if self._http_session is None:
session = VerboseHTTPSession()
session.headers.update(self.DEFAULT_HTTP_HEADERS)
self._http_session = session
return self._http_session
@property
def api_version(self):
return self._api_version
def is_token_required(self):
"""Helper method for vk_requests.auth.VKSession initialization
:return: bool
"""
return any([self.app_id, self._login, self._password])
def do_login(self, http_session):
"""Do vk login
:param http_session: vk_requests.utils.VerboseHTTPSession: http session
"""
response = http_session.get(self.LOGIN_URL)
action_url = parse_form_action_url(response.text)
# Stop login it action url is not found
if not action_url:
logger.debug(response.text)
raise VkParseError("Can't parse form action url")
login_form_data = {'email': self._login, 'pass': self._password}
login_response = http_session.post(action_url, login_form_data)
logger.debug('Cookies: %s', http_session.cookies)
response_url_query = parse_url_query_params(
login_response.url, fragment=False)
logger.debug('response_url_query: %s', response_url_query)
act = response_url_query.get('act')
# Check response url query params firstly
if 'sid' in response_url_query:
self.require_auth_captcha(
response=login_response,
query_params=response_url_query,
login_form_data=login_form_data,
http_session=http_session)
elif act == 'authcheck':
self.require_2fa(html=login_response.text,
http_session=http_session)
elif act == 'security_check':
self.require_phone_number(html=login_response.text,
session=http_session)
session_cookies = ('remixsid' in http_session.cookies,
'remixsid6' in http_session.cookies)
if any(session_cookies):
logger.info('VK session is established')
return True
else:
message = 'Authorization error: incorrect password or ' \
'authentication code'
logger.error(message)
raise VkAuthError(message)
def do_implicit_flow_authorization(self, session):
""" Standard OAuth2 authorization method. It's used for getting access token
More info: https://vk.com/dev/implicit_flow_user
"""
logger.info('Doing implicit flow authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'display': 'mobile',
'response_type': 'token',
'scope': self.scope,
'redirect_uri': 'https://oauth.vk.com/blank.html',
'v': self.api_version
}
response = session.post(url=self.AUTHORIZE_URL,
data=stringify_values(auth_data))
url_query_params = parse_url_query_params(response.url)
if 'expires_in' in url_query_params:
logger.info('Token will be expired in %s sec.' %
url_query_params['expires_in'])
if 'access_token' in url_query_params:
return url_query_params
# Permissions are needed
logger.info('Getting permissions')
action_url = parse_form_action_url(response.text)
logger.debug('Response form action: %s', action_url)
if action_url:
response = session.get(action_url)
url_query_params = parse_url_query_params(response.url)
return url_query_params
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
logger.error('Permissions obtained')
raise VkAuthError(error_message)
def do_direct_authorization(self, session):
""" Direct Authorization, more info: https://vk.com/dev/auth_direct """
logger.info('Doing direct authorization, app_id=%s', self.app_id)
auth_data = {
'client_id': self.app_id,
'client_secret': self._client_secret,
'username': self._login,
'password': self._password,
'grant_type': 'password',
'2fa_supported': self._two_fa_supported,
'scope': self.scope,
'v': self.api_version
}
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
else:
if 'access_token' in response_json:
return response_json
if response_json['error'] == 'need_validation':
return self.direct_auth_require_2fa(session, auth_data)
elif response_json['error'] == 'need_captcha':
return self.direct_auth_require_captcha(session, response_json, auth_data)
else:
error_message = 'VK error: [{}] {}'.format(
response_json['error'], response_json['error_description'])
raise VkAuthError(error_message)
def direct_auth_require_2fa(self, session, auth_data):
if self._two_fa_force_sms:
auth_data['force_sms'] = self._two_fa_force_sms
session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
auth_data['code'] = self.get_2fa_code()
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def direct_auth_require_captcha(self, session, response, auth_data):
logger.info('Captcha is needed. Response: %s', response)
captcha_url = response['captcha_img']
logger.info('Captcha url %s', captcha_url)
auth_data['captcha_sid'] = response['captcha_sid']
auth_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = session.post(url=self.DIRECT_AUTHORIZE_URL,
data=stringify_values(auth_data))
try:
response_json = response.json()
except ValueError: # not JSON in response
error_message = 'OAuth2 grant access error'
logger.error(response.text)
raise VkAuthError(error_message)
return response_json
def require_2fa(self, html, http_session):
logger.info(
'User enabled 2 factors authentication. Auth check code is needed '
'(SMS, Google Authenticator or one-time password generated by vk)')
action_url = parse_form_action_url(html)
auth_check_code = self.get_2fa_code()
auth_check_data = {
'code': auth_check_code,
'_ajax': '1',
'remember': '1'
}
url = '/'.join([self.LOGIN_URL + action_url])
response = http_session.post(url=url, data=auth_check_data)
return response
def require_auth_captcha(self, response, query_params,
login_form_data, http_session):
"""Resolve auth captcha case
:param response: http response
:param query_params: dict: response query params, for example:
{'s': '0', 'email': 'my@email', 'dif': '1', 'role': 'fast', 'sid': '1'}
:param login_form_data: dict
:param http_session: requests.Session
:return: :raise VkAuthError:
"""
logger.info('Captcha is needed. Query params: %s', query_params)
form_text = response.text
action_url = parse_form_action_url(form_text)
logger.debug('form action url: %s', action_url)
if not action_url:
raise VkAuthError('Cannot find form action url')
captcha_sid, captcha_url = parse_captcha_html(
html=response.text, response_url=response.url)
logger.info('Captcha url %s', captcha_url)
login_form_data['captcha_sid'] = captcha_sid
login_form_data['captcha_key'] = self.get_captcha_key(captcha_url)
response = http_session.post(action_url, login_form_data)
return response
def require_phone_number(self, html, session):
logger.info(
'Auth requires phone number. You do login from unusual place')
# Raises VkPageWarningsError in case of warnings
# NOTE: we check only 'security_check' case on warnings for now
# in future it might be extended for other cases as well
check_html_warnings(html=html)
# Determine form action url
action_url = parse_form_action_url(html)
# Get masked phone from html to make things more clear
phone_prefix, phone_suffix = parse_masked_phone_number(html)
if self._phone_number:
code = self._phone_number[len(phone_prefix):-len(phone_suffix)]
else:
if self.interactive:
prompt = 'Enter missing digits of your phone number (%s****%s): '\
% (phone_prefix, phone_suffix)
code = raw_input(prompt)
else:
raise VkAuthError(
'Phone number is required. Create an API instance using '
'phone_number parameter or use interactive mode')
params = parse_url_query_params(action_url, fragment=False)
auth_data = {
'code': code,
'act': 'security_check',
'hash': params['hash']}
response = session.post(
url=self.LOGIN_URL + action_url, data=auth_data)
logger.debug('require_phone_number resp: %s', response.text)
def get_2fa_code(self):
if self.interactive:
auth_check_code = raw_input('Auth check code: ')
return auth_check_code.strip()
else:
raise VkAuthError(
'Auth check code is needed (SMS, Google Authenticator or '
'one-time password). '
'Use interactive mode to enter the code manually')
@property
def access_token(self):
if self._access_token is None:
self._access_token = self._get_access_token()
return self._access_token
def _get_access_token(self):
"""Get access token using app_id, login and password OR service token
(service token docs: https://vk.com/dev/service_token
"""
if self._service_token:
logger.info('Use service token: %s',
5 * '*' + self._service_token[50:])
return self._service_token
if not all([self.app_id, self._login, self._password]):
raise ValueError(
'app_id=%s, login=%s password=%s (masked) must be given'
% (self.app_id, self._login,
'*' * len(self._password) if self._password else 'None'))
logger.info("Getting access token for user '%s'" % self._login)
with self.http_session as s:
if self._client_secret:
url_query_params = self.do_direct_authorization(session=s)
else:
self.do_login(http_session=s)
url_query_params = self.do_implicit_flow_authorization(session=s)
logger.debug('url_query_params: %s', url_query_params)
if 'access_token' in url_query_params:
logger.info('Access token has been gotten')
return url_query_params['access_token']
else:
raise VkAuthError('OAuth2 authorization error. Url params: %s'
% url_query_params)
def get_captcha_key(self, captcha_image_url):
"""Read CAPTCHA key from user input"""
if self.interactive:
print('Open CAPTCHA image url in your browser and enter it below: ',
captcha_image_url)
captcha_key = raw_input('Enter CAPTCHA key: ')
return captcha_key
else:
raise VkAuthError(
'Captcha is required. Use interactive mode to enter it '
'manually')
def renew_access_token(self):
"""Force to get new access token
"""
self._access_token = self._get_access_token()
def make_request(self, request, captcha_response=None):
"""Make api request helper function
:param request: vk_requests.api.Request instance
:param captcha_response: None or dict, e.g {'sid': <sid>, 'key': <key>}
:return: dict: json decoded http response
"""
logger.debug('Prepare API Method request %r', request)
response = self._send_api_request(request=request,
captcha_response=captcha_response)
response.raise_for_status()
response_or_error = json.loads(response.text)
logger.debug('response: %s', response_or_error)
if 'error' in response_or_error:
error_data = response_or_error['error']
vk_error = VkAPIError(error_data)
if vk_error.is_captcha_needed():
captcha_key = self.get_captcha_key(vk_error.captcha_img_url)
if not captcha_key:
raise vk_error
# Retry http request with captcha info attached
captcha_response = {
'sid': vk_error.captcha_sid,
'key': captcha_key,
}
return self.make_request(
request, captcha_response=captcha_response)
elif vk_error.is_access_token_incorrect():
logger.info(
'Authorization failed. Access token will be dropped')
self._access_token = None
return self.make_request(request)
else:
raise vk_error
elif 'execute_errors' in response_or_error:
# can take place while running .execute vk method
# See more: https://vk.com/dev/execute
raise VkAPIError(response_or_error['execute_errors'][0])
elif 'response' in response_or_error:
return response_or_error['response']
def __repr__(self): # pragma: no cover
return "%s(api_url='%s', access_token='%s')" % (
self.__class__.__name__, self.API_URL, self._access_token)
|
prawn-cake/vk-requests | vk_requests/__init__.py | create_api | python | def create_api(app_id=None, login=None, password=None, phone_number=None,
scope='offline', api_version='5.92', http_params=None,
interactive=False, service_token=None, client_secret=None,
two_fa_supported=False, two_fa_force_sms=False):
session = VKSession(app_id=app_id,
user_login=login,
user_password=password,
phone_number=phone_number,
scope=scope,
service_token=service_token,
api_version=api_version,
interactive=interactive,
client_secret=client_secret,
two_fa_supported = two_fa_supported,
two_fa_force_sms=two_fa_force_sms)
return API(session=session, http_params=http_params) | Factory method to explicitly create API with app_id, login, password
and phone_number parameters.
If the app_id, login, password are not passed, then token-free session
will be created automatically
:param app_id: int: vk application id, more info: https://vk.com/dev/main
:param login: str: vk login
:param password: str: vk password
:param phone_number: str: phone number with country code (+71234568990)
:param scope: str or list of str: vk session scope
:param api_version: str: vk api version, check https://vk.com/dev/versions
:param interactive: bool: flag which indicates to use InteractiveVKSession
:param service_token: str: new way of querying vk api, instead of getting
oauth token
:param http_params: dict: requests http parameters passed along
:param client_secret: str: secure application key for Direct Authorization,
more info: https://vk.com/dev/auth_direct
:param two_fa_supported: bool: enable two-factor authentication for Direct Authorization,
more info: https://vk.com/dev/auth_direct
:param two_fa_force_sms: bool: force SMS two-factor authentication for Direct Authorization
if two_fa_supported is True, more info: https://vk.com/dev/auth_direct
:return: api instance
:rtype : vk_requests.api.API | train | https://github.com/prawn-cake/vk-requests/blob/dde01c1ed06f13de912506163a35d8c7e06a8f62/vk_requests/__init__.py#L21-L61 | null | # -*- coding: utf-8 -*-
import sys
from vk_requests.session import VKSession
from vk_requests.api import API
__version__ = '1.2.0'
PY_VERSION = sys.version_info.major, sys.version_info.minor
if PY_VERSION < (3, 4):
import warnings
warnings.simplefilter('default')
warnings.warn('Support of all python version less than 3.4 will be stopped '
'in 2.0.0, please migrate your code to python 3.4+',
DeprecationWarning)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('vk-requests').addHandler(NullHandler())
|
samuelcolvin/arq | arq/cli.py | cli | python | def cli(*, worker_settings, burst, check, watch, verbose):
sys.path.append(os.getcwd())
worker_settings = import_string(worker_settings)
logging.config.dictConfig(default_log_config(verbose))
if check:
exit(check_health(worker_settings))
else:
kwargs = {} if burst is None else {'burst': burst}
if watch:
loop = asyncio.get_event_loop()
loop.run_until_complete(watch_reload(watch, worker_settings, loop))
else:
run_worker(worker_settings, **kwargs) | Job queues in python with asyncio and redis.
CLI to run the arq worker. | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/cli.py#L27-L45 | [
"def check_health(settings_cls) -> int:\n \"\"\"\n Run a health check on the worker and return the appropriate exit code.\n :return: 0 if successful, 1 if not\n \"\"\"\n cls_kwargs = get_kwargs(settings_cls)\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(async_check_health(cls_kwargs.get('redis_settings')))\n",
"def run_worker(settings_cls, **kwargs) -> Worker:\n worker = create_worker(settings_cls, **kwargs)\n worker.run()\n return worker\n",
"def default_log_config(verbose: bool) -> dict:\n \"\"\"\n Setup default config. for dictConfig.\n\n :param verbose: level: DEBUG if True, INFO if False\n :return: dict suitable for ``logging.config.dictConfig``\n \"\"\"\n log_level = 'DEBUG' if verbose else 'INFO'\n return {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'arq.standard': {'level': log_level, 'class': 'logging.StreamHandler', 'formatter': 'arq.standard'}\n },\n 'formatters': {'arq.standard': {'format': '%(asctime)s: %(message)s', 'datefmt': '%H:%M:%S'}},\n 'loggers': {'arq': {'handlers': ['arq.standard'], 'level': log_level}},\n }\n",
"async def watch_reload(path, worker_settings, loop):\n try:\n from watchgod import awatch\n except ImportError as e: # pragma: no cover\n raise ImportError('watchgod not installed, use `pip install watchgod`') from e\n\n stop_event = asyncio.Event()\n worker = create_worker(worker_settings)\n try:\n worker.on_stop = lambda s: s != Signals.SIGUSR1 and stop_event.set()\n loop.create_task(worker.async_run())\n async for _ in awatch(path, stop_event=stop_event):\n print('\\nfiles changed, reloading arq worker...')\n worker.handle_sig(Signals.SIGUSR1)\n await worker.close()\n loop.create_task(worker.async_run())\n finally:\n await worker.close()\n"
] | import asyncio
import logging.config
import os
import sys
from signal import Signals
import click
from pydantic.utils import import_string
from .logs import default_log_config
from .version import VERSION
from .worker import check_health, create_worker, run_worker
burst_help = 'Batch mode: exit once no jobs are found in any queue.'
health_check_help = 'Health Check: run a health check and exit.'
watch_help = 'Watch a directory and reload the worker upon changes.'
verbose_help = 'Enable verbose output.'
@click.command('arq')
@click.version_option(VERSION, '-V', '--version', prog_name='arq')
@click.argument('worker-settings', type=str, required=True)
@click.option('--burst/--no-burst', default=None, help=burst_help)
@click.option('--check', is_flag=True, help=health_check_help)
@click.option('--watch', type=click.Path(exists=True, dir_okay=True, file_okay=False), help=watch_help)
@click.option('-v', '--verbose', is_flag=True, help=verbose_help)
async def watch_reload(path, worker_settings, loop):
try:
from watchgod import awatch
except ImportError as e: # pragma: no cover
raise ImportError('watchgod not installed, use `pip install watchgod`') from e
stop_event = asyncio.Event()
worker = create_worker(worker_settings)
try:
worker.on_stop = lambda s: s != Signals.SIGUSR1 and stop_event.set()
loop.create_task(worker.async_run())
async for _ in awatch(path, stop_event=stop_event):
print('\nfiles changed, reloading arq worker...')
worker.handle_sig(Signals.SIGUSR1)
await worker.close()
loop.create_task(worker.async_run())
finally:
await worker.close()
|
samuelcolvin/arq | arq/cron.py | next_cron | python | def next_cron(
previous_dt: datetime,
*,
month: Union[None, set, int] = None,
day: Union[None, set, int] = None,
weekday: Union[None, set, int, str] = None,
hour: Union[None, set, int] = None,
minute: Union[None, set, int] = None,
second: Union[None, set, int] = 0,
microsecond: int = 123_456,
):
dt = previous_dt + timedelta(seconds=1)
if isinstance(weekday, str):
weekday = weekdays.index(weekday.lower())
options = dict(
month=month, day=day, weekday=weekday, hour=hour, minute=minute, second=second, microsecond=microsecond
)
while True:
next_dt = _get_next_dt(dt, options)
# print(dt, next_dt)
if next_dt is None:
return dt
dt = next_dt | Find the next datetime matching the given parameters. | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/cron.py#L65-L91 | [
"def _get_next_dt(dt_, options): # noqa: C901\n for field in dt_fields:\n v = options[field]\n if v is None:\n continue\n if field == D.weekday:\n next_v = dt_.weekday()\n else:\n next_v = getattr(dt_, field)\n if isinstance(v, int):\n mismatch = next_v != v\n else:\n assert isinstance(v, (set, list, tuple))\n mismatch = next_v not in v\n # print(field, v, next_v, mismatch)\n if mismatch:\n micro = max(dt_.microsecond - options[D.microsecond], 0)\n if field == D.month:\n if dt_.month == 12:\n return datetime(dt_.year + 1, 1, 1)\n else:\n return datetime(dt_.year, dt_.month + 1, 1)\n elif field in (D.day, D.weekday):\n return (\n dt_\n + timedelta(days=1)\n - timedelta(hours=dt_.hour, minutes=dt_.minute, seconds=dt_.second, microseconds=micro)\n )\n elif field == D.hour:\n return dt_ + timedelta(hours=1) - timedelta(minutes=dt_.minute, seconds=dt_.second, microseconds=micro)\n elif field == D.minute:\n return dt_ + timedelta(minutes=1) - timedelta(seconds=dt_.second, microseconds=micro)\n elif field == D.second:\n return dt_ + timedelta(seconds=1) - timedelta(microseconds=micro)\n else:\n assert field == D.microsecond, field\n return dt_ + timedelta(microseconds=options['microsecond'] - dt_.microsecond)\n"
] | import asyncio
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum
from typing import Callable, Optional, Union
from pydantic.utils import import_string
from arq.utils import SecondsTimedelta, to_seconds
class D(str, Enum):
month = 'month'
day = 'day'
weekday = 'weekday'
hour = 'hour'
minute = 'minute'
second = 'second'
microsecond = 'microsecond'
dt_fields = D.month, D.day, D.weekday, D.hour, D.minute, D.second, D.microsecond
weekdays = 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat', 'sun'
def _get_next_dt(dt_, options): # noqa: C901
for field in dt_fields:
v = options[field]
if v is None:
continue
if field == D.weekday:
next_v = dt_.weekday()
else:
next_v = getattr(dt_, field)
if isinstance(v, int):
mismatch = next_v != v
else:
assert isinstance(v, (set, list, tuple))
mismatch = next_v not in v
# print(field, v, next_v, mismatch)
if mismatch:
micro = max(dt_.microsecond - options[D.microsecond], 0)
if field == D.month:
if dt_.month == 12:
return datetime(dt_.year + 1, 1, 1)
else:
return datetime(dt_.year, dt_.month + 1, 1)
elif field in (D.day, D.weekday):
return (
dt_
+ timedelta(days=1)
- timedelta(hours=dt_.hour, minutes=dt_.minute, seconds=dt_.second, microseconds=micro)
)
elif field == D.hour:
return dt_ + timedelta(hours=1) - timedelta(minutes=dt_.minute, seconds=dt_.second, microseconds=micro)
elif field == D.minute:
return dt_ + timedelta(minutes=1) - timedelta(seconds=dt_.second, microseconds=micro)
elif field == D.second:
return dt_ + timedelta(seconds=1) - timedelta(microseconds=micro)
else:
assert field == D.microsecond, field
return dt_ + timedelta(microseconds=options['microsecond'] - dt_.microsecond)
@dataclass
class CronJob:
name: str
coroutine: Callable
month: Union[None, set, int]
day: Union[None, set, int]
weekday: Union[None, set, int, str]
hour: Union[None, set, int]
minute: Union[None, set, int]
second: Union[None, set, int]
microsecond: int
run_at_startup: bool
unique: bool
timeout_s: Optional[float]
keep_result_s: Optional[float]
max_tries: Optional[int]
next_run: datetime = None
def set_next(self, dt: datetime):
self.next_run = next_cron(
dt,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond,
)
def __repr__(self):
return '<CronJob {}>'.format(' '.join(f'{k}={v}' for k, v in self.__dict__.items()))
def cron(
coroutine: Union[str, Callable],
*,
name: Optional[str] = None,
month: Union[None, set, int] = None,
day: Union[None, set, int] = None,
weekday: Union[None, set, int, str] = None,
hour: Union[None, set, int] = None,
minute: Union[None, set, int] = None,
second: Union[None, set, int] = 0,
microsecond: int = 123_456,
run_at_startup: bool = False,
unique: bool = True,
timeout: Optional[SecondsTimedelta] = None,
keep_result: Optional[float] = 0,
max_tries: Optional[int] = 1,
) -> CronJob:
"""
Create a cron job, eg. it should be executed at specific times.
Workers will enqueue this job at or just after the set times. If ``unique`` is true (the default) the
job will only be run once even if multiple workers are running.
:param coroutine: coroutine function to run
:param name: name of the job, if None, the name of the coroutine is used
:param month: month(s) to run the job on, 1 - 12
:param day: day(s) to run the job on, 1 - 31
:param weekday: week day(s) to run the job on, 0 - 6 or mon - sun
:param hour: hour(s) to run the job on, 0 - 23
:param minute: minute(s) to run the job on, 0 - 59
:param second: second(s) to run the job on, 0 - 59
:param microsecond: microsecond(s) to run the job on,
defaults to 123456 as the world is busier at the top of a second, 0 - 1e6
:param run_at_startup: whether to run as worker starts
:param unique: whether the job should be only be executed once at each time
:param timeout: job timeout
:param keep_result: how long to keep the result for
:param max_tries: maximum number of tries for the job
"""
if isinstance(coroutine, str):
name = name or 'cron:' + coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return CronJob(
name or 'cron:' + coroutine.__qualname__,
coroutine,
month,
day,
weekday,
hour,
minute,
second,
microsecond,
run_at_startup,
unique,
timeout,
keep_result,
max_tries,
)
|
samuelcolvin/arq | arq/cron.py | cron | python | def cron(
coroutine: Union[str, Callable],
*,
name: Optional[str] = None,
month: Union[None, set, int] = None,
day: Union[None, set, int] = None,
weekday: Union[None, set, int, str] = None,
hour: Union[None, set, int] = None,
minute: Union[None, set, int] = None,
second: Union[None, set, int] = 0,
microsecond: int = 123_456,
run_at_startup: bool = False,
unique: bool = True,
timeout: Optional[SecondsTimedelta] = None,
keep_result: Optional[float] = 0,
max_tries: Optional[int] = 1,
) -> CronJob:
if isinstance(coroutine, str):
name = name or 'cron:' + coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return CronJob(
name or 'cron:' + coroutine.__qualname__,
coroutine,
month,
day,
weekday,
hour,
minute,
second,
microsecond,
run_at_startup,
unique,
timeout,
keep_result,
max_tries,
) | Create a cron job, eg. it should be executed at specific times.
Workers will enqueue this job at or just after the set times. If ``unique`` is true (the default) the
job will only be run once even if multiple workers are running.
:param coroutine: coroutine function to run
:param name: name of the job, if None, the name of the coroutine is used
:param month: month(s) to run the job on, 1 - 12
:param day: day(s) to run the job on, 1 - 31
:param weekday: week day(s) to run the job on, 0 - 6 or mon - sun
:param hour: hour(s) to run the job on, 0 - 23
:param minute: minute(s) to run the job on, 0 - 59
:param second: second(s) to run the job on, 0 - 59
:param microsecond: microsecond(s) to run the job on,
defaults to 123456 as the world is busier at the top of a second, 0 - 1e6
:param run_at_startup: whether to run as worker starts
:param unique: whether the job should be only be executed once at each time
:param timeout: job timeout
:param keep_result: how long to keep the result for
:param max_tries: maximum number of tries for the job | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/cron.py#L128-L191 | [
"def to_seconds(td: Optional[SecondsTimedelta]) -> Optional[float]:\n if td is None:\n return td\n elif isinstance(td, timedelta):\n return td.total_seconds()\n return td\n"
] | import asyncio
from dataclasses import dataclass
from datetime import datetime, timedelta
from enum import Enum
from typing import Callable, Optional, Union
from pydantic.utils import import_string
from arq.utils import SecondsTimedelta, to_seconds
class D(str, Enum):
month = 'month'
day = 'day'
weekday = 'weekday'
hour = 'hour'
minute = 'minute'
second = 'second'
microsecond = 'microsecond'
dt_fields = D.month, D.day, D.weekday, D.hour, D.minute, D.second, D.microsecond
weekdays = 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat', 'sun'
def _get_next_dt(dt_, options): # noqa: C901
for field in dt_fields:
v = options[field]
if v is None:
continue
if field == D.weekday:
next_v = dt_.weekday()
else:
next_v = getattr(dt_, field)
if isinstance(v, int):
mismatch = next_v != v
else:
assert isinstance(v, (set, list, tuple))
mismatch = next_v not in v
# print(field, v, next_v, mismatch)
if mismatch:
micro = max(dt_.microsecond - options[D.microsecond], 0)
if field == D.month:
if dt_.month == 12:
return datetime(dt_.year + 1, 1, 1)
else:
return datetime(dt_.year, dt_.month + 1, 1)
elif field in (D.day, D.weekday):
return (
dt_
+ timedelta(days=1)
- timedelta(hours=dt_.hour, minutes=dt_.minute, seconds=dt_.second, microseconds=micro)
)
elif field == D.hour:
return dt_ + timedelta(hours=1) - timedelta(minutes=dt_.minute, seconds=dt_.second, microseconds=micro)
elif field == D.minute:
return dt_ + timedelta(minutes=1) - timedelta(seconds=dt_.second, microseconds=micro)
elif field == D.second:
return dt_ + timedelta(seconds=1) - timedelta(microseconds=micro)
else:
assert field == D.microsecond, field
return dt_ + timedelta(microseconds=options['microsecond'] - dt_.microsecond)
def next_cron(
previous_dt: datetime,
*,
month: Union[None, set, int] = None,
day: Union[None, set, int] = None,
weekday: Union[None, set, int, str] = None,
hour: Union[None, set, int] = None,
minute: Union[None, set, int] = None,
second: Union[None, set, int] = 0,
microsecond: int = 123_456,
):
"""
Find the next datetime matching the given parameters.
"""
dt = previous_dt + timedelta(seconds=1)
if isinstance(weekday, str):
weekday = weekdays.index(weekday.lower())
options = dict(
month=month, day=day, weekday=weekday, hour=hour, minute=minute, second=second, microsecond=microsecond
)
while True:
next_dt = _get_next_dt(dt, options)
# print(dt, next_dt)
if next_dt is None:
return dt
dt = next_dt
@dataclass
class CronJob:
name: str
coroutine: Callable
month: Union[None, set, int]
day: Union[None, set, int]
weekday: Union[None, set, int, str]
hour: Union[None, set, int]
minute: Union[None, set, int]
second: Union[None, set, int]
microsecond: int
run_at_startup: bool
unique: bool
timeout_s: Optional[float]
keep_result_s: Optional[float]
max_tries: Optional[int]
next_run: datetime = None
def set_next(self, dt: datetime):
self.next_run = next_cron(
dt,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond,
)
def __repr__(self):
return '<CronJob {}>'.format(' '.join(f'{k}={v}' for k, v in self.__dict__.items()))
|
samuelcolvin/arq | arq/utils.py | to_unix_ms | python | def to_unix_ms(dt: datetime) -> int:
utcoffset = dt.utcoffset()
ep = epoch if utcoffset is None else epoch_tz
return as_int((dt - ep).total_seconds() * 1000) | convert a datetime to number of milliseconds since 1970 and calculate timezone offset | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/utils.py#L23-L29 | [
"def as_int(f: float) -> int:\n return int(round(f))\n"
] | import asyncio
import logging
from datetime import datetime, timedelta, timezone
from time import time
from typing import Optional, Union
logger = logging.getLogger('arq.utils')
epoch = datetime(1970, 1, 1)
epoch_tz = epoch.replace(tzinfo=timezone.utc)
SecondsTimedelta = Union[int, float, timedelta]
def as_int(f: float) -> int:
return int(round(f))
def timestamp_ms() -> int:
return as_int(time() * 1000)
def ms_to_datetime(unix_ms: int) -> datetime:
return epoch + timedelta(seconds=unix_ms / 1000)
def to_ms(td: Optional[SecondsTimedelta]) -> Optional[int]:
if td is None:
return td
elif isinstance(td, timedelta):
td = td.total_seconds()
return as_int(td * 1000)
def to_seconds(td: Optional[SecondsTimedelta]) -> Optional[float]:
if td is None:
return td
elif isinstance(td, timedelta):
return td.total_seconds()
return td
async def poll(step: float = 0.5):
loop = asyncio.get_event_loop()
start = loop.time()
while True:
before = loop.time()
yield before - start
after = loop.time()
wait = max([0, step - after + before])
await asyncio.sleep(wait)
DEFAULT_CURTAIL = 80
def truncate(s: str, length: int = DEFAULT_CURTAIL) -> str:
"""
Truncate a string and add an ellipsis (three dots) to the end if it was too long
:param s: string to possibly truncate
:param length: length to truncate the string to
"""
if len(s) > length:
s = s[: length - 1] + '…'
return s
def args_to_string(args, kwargs):
arguments = ''
if args:
arguments = ', '.join(map(repr, args))
if kwargs:
if arguments:
arguments += ', '
arguments += ', '.join(f'{k}={v!r}' for k, v in sorted(kwargs.items()))
return truncate(arguments)
|
samuelcolvin/arq | arq/utils.py | truncate | python | def truncate(s: str, length: int = DEFAULT_CURTAIL) -> str:
if len(s) > length:
s = s[: length - 1] + '…'
return s | Truncate a string and add an ellipsis (three dots) to the end if it was too long
:param s: string to possibly truncate
:param length: length to truncate the string to | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/utils.py#L66-L75 | null | import asyncio
import logging
from datetime import datetime, timedelta, timezone
from time import time
from typing import Optional, Union
logger = logging.getLogger('arq.utils')
epoch = datetime(1970, 1, 1)
epoch_tz = epoch.replace(tzinfo=timezone.utc)
SecondsTimedelta = Union[int, float, timedelta]
def as_int(f: float) -> int:
return int(round(f))
def timestamp_ms() -> int:
return as_int(time() * 1000)
def to_unix_ms(dt: datetime) -> int:
"""
convert a datetime to number of milliseconds since 1970 and calculate timezone offset
"""
utcoffset = dt.utcoffset()
ep = epoch if utcoffset is None else epoch_tz
return as_int((dt - ep).total_seconds() * 1000)
def ms_to_datetime(unix_ms: int) -> datetime:
return epoch + timedelta(seconds=unix_ms / 1000)
def to_ms(td: Optional[SecondsTimedelta]) -> Optional[int]:
if td is None:
return td
elif isinstance(td, timedelta):
td = td.total_seconds()
return as_int(td * 1000)
def to_seconds(td: Optional[SecondsTimedelta]) -> Optional[float]:
if td is None:
return td
elif isinstance(td, timedelta):
return td.total_seconds()
return td
async def poll(step: float = 0.5):
loop = asyncio.get_event_loop()
start = loop.time()
while True:
before = loop.time()
yield before - start
after = loop.time()
wait = max([0, step - after + before])
await asyncio.sleep(wait)
DEFAULT_CURTAIL = 80
def args_to_string(args, kwargs):
arguments = ''
if args:
arguments = ', '.join(map(repr, args))
if kwargs:
if arguments:
arguments += ', '
arguments += ', '.join(f'{k}={v!r}' for k, v in sorted(kwargs.items()))
return truncate(arguments)
|
samuelcolvin/arq | arq/connections.py | create_pool | python | async def create_pool(settings: RedisSettings = None, *, _retry: int = 0) -> ArqRedis:
settings = settings or RedisSettings()
addr = settings.host, settings.port
try:
pool = await aioredis.create_redis_pool(
addr,
db=settings.database,
password=settings.password,
timeout=settings.conn_timeout,
encoding='utf8',
commands_factory=ArqRedis,
)
except (ConnectionError, OSError, aioredis.RedisError, asyncio.TimeoutError) as e:
if _retry < settings.conn_retries:
logger.warning(
'redis connection error %s:%s %s %s, %d retries remaining...',
settings.host,
settings.port,
e.__class__.__name__,
e,
settings.conn_retries - _retry,
)
await asyncio.sleep(settings.conn_retry_delay)
else:
raise
else:
if _retry > 0:
logger.info('redis connection successful')
return pool
# recursively attempt to create the pool outside the except block to avoid
# "During handling of the above exception..." madness
return await create_pool(settings, _retry=_retry + 1) | Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails.
Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance,
thus allowing job enqueuing. | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/connections.py#L125-L163 | [
"async def create_pool(settings: RedisSettings = None, *, _retry: int = 0) -> ArqRedis:\n \"\"\"\n Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails.\n\n Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance,\n thus allowing job enqueuing.\n \"\"\"\n settings = settings or RedisSettings()\n addr = settings.host, settings.port\n try:\n pool = await aioredis.create_redis_pool(\n addr,\n db=settings.database,\n password=settings.password,\n timeout=settings.conn_timeout,\n encoding='utf8',\n commands_factory=ArqRedis,\n )\n except (ConnectionError, OSError, aioredis.RedisError, asyncio.TimeoutError) as e:\n if _retry < settings.conn_retries:\n logger.warning(\n 'redis connection error %s:%s %s %s, %d retries remaining...',\n settings.host,\n settings.port,\n e.__class__.__name__,\n e,\n settings.conn_retries - _retry,\n )\n await asyncio.sleep(settings.conn_retry_delay)\n else:\n raise\n else:\n if _retry > 0:\n logger.info('redis connection successful')\n return pool\n\n # recursively attempt to create the pool outside the except block to avoid\n # \"During handling of the above exception...\" madness\n return await create_pool(settings, _retry=_retry + 1)\n"
] | import asyncio
import logging
from dataclasses import dataclass
from datetime import datetime, timedelta
from operator import attrgetter
from typing import Any, List, Optional, Union
from uuid import uuid4
import aioredis
from aioredis import MultiExecError, Redis
from .constants import job_key_prefix, queue_name, result_key_prefix
from .jobs import Job, JobResult, pickle_job
from .utils import timestamp_ms, to_ms, to_unix_ms
logger = logging.getLogger('arq.connections')
@dataclass
class RedisSettings:
"""
No-Op class used to hold redis connection redis_settings.
Used by :func:`arq.connections.create_pool` and :class:`arq.worker.Worker`.
"""
host: str = 'localhost'
port: int = 6379
database: int = 0
password: str = None
conn_timeout: int = 1
conn_retries: int = 5
conn_retry_delay: int = 1
def __repr__(self):
return '<RedisSettings {}>'.format(' '.join(f'{k}={v}' for k, v in self.__dict__.items()))
# extra time after the job is expected to start when the job key should expire, 1 day in ms
expires_extra_ms = 86_400_000
class ArqRedis(Redis):
"""
Thin subclass of ``aioredis.Redis`` which adds :func:`arq.connections.enqueue_job`.
"""
async def enqueue_job(
self,
function: str,
*args: Any,
_job_id: Optional[str] = None,
_defer_until: Optional[datetime] = None,
_defer_by: Union[None, int, float, timedelta] = None,
_expires: Union[None, int, float, timedelta] = None,
_job_try: Optional[int] = None,
**kwargs: Any,
) -> Optional[Job]:
"""
Enqueue a job.
:param function: Name of the function to call
:param args: args to pass to the function
:param _job_id: ID of the job, can be used to enforce job uniqueness
:param _defer_until: datetime at which to run the job
:param _defer_by: duration to wait before running the job
:param _expires: if the job still hasn't started after this duration, do not run it
:param _job_try: useful when re-enqueueing jobs within a job
:param kwargs: any keyword arguments to pass to the function
:return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists
"""
job_id = _job_id or uuid4().hex
job_key = job_key_prefix + job_id
assert not (_defer_until and _defer_by), "use either 'defer_until' or 'defer_by' or neither, not both"
defer_by_ms = to_ms(_defer_by)
expires_ms = to_ms(_expires)
with await self as conn:
pipe = conn.pipeline()
pipe.unwatch()
pipe.watch(job_key)
job_exists = pipe.exists(job_key)
await pipe.execute()
if await job_exists:
return
enqueue_time_ms = timestamp_ms()
if _defer_until is not None:
score = to_unix_ms(_defer_until)
elif defer_by_ms:
score = enqueue_time_ms + defer_by_ms
else:
score = enqueue_time_ms
expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms
job = pickle_job(function, args, kwargs, _job_try, enqueue_time_ms)
tr = conn.multi_exec()
tr.psetex(job_key, expires_ms, job)
tr.zadd(queue_name, score, job_id)
try:
await tr.execute()
except MultiExecError:
# job got enqueued since we checked 'job_exists'
return
return Job(job_id, self)
async def _get_job_result(self, key):
job_id = key[len(result_key_prefix) :]
job = Job(job_id, self)
r = await job.result_info()
r.job_id = job_id
return r
async def all_job_results(self) -> List[JobResult]:
"""
Get results for all jobs in redis.
"""
keys = await self.keys(result_key_prefix + '*')
results = await asyncio.gather(*[self._get_job_result(k) for k in keys])
return sorted(results, key=attrgetter('enqueue_time'))
async def log_redis_info(redis, log_func):
with await redis as r:
info, key_count = await asyncio.gather(r.info(), r.dbsize())
log_func(
f'redis_version={info["server"]["redis_version"]} '
f'mem_usage={info["memory"]["used_memory_human"]} '
f'clients_connected={info["clients"]["connected_clients"]} '
f'db_keys={key_count}'
)
|
samuelcolvin/arq | arq/connections.py | ArqRedis.enqueue_job | python | async def enqueue_job(
self,
function: str,
*args: Any,
_job_id: Optional[str] = None,
_defer_until: Optional[datetime] = None,
_defer_by: Union[None, int, float, timedelta] = None,
_expires: Union[None, int, float, timedelta] = None,
_job_try: Optional[int] = None,
**kwargs: Any,
) -> Optional[Job]:
job_id = _job_id or uuid4().hex
job_key = job_key_prefix + job_id
assert not (_defer_until and _defer_by), "use either 'defer_until' or 'defer_by' or neither, not both"
defer_by_ms = to_ms(_defer_by)
expires_ms = to_ms(_expires)
with await self as conn:
pipe = conn.pipeline()
pipe.unwatch()
pipe.watch(job_key)
job_exists = pipe.exists(job_key)
await pipe.execute()
if await job_exists:
return
enqueue_time_ms = timestamp_ms()
if _defer_until is not None:
score = to_unix_ms(_defer_until)
elif defer_by_ms:
score = enqueue_time_ms + defer_by_ms
else:
score = enqueue_time_ms
expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms
job = pickle_job(function, args, kwargs, _job_try, enqueue_time_ms)
tr = conn.multi_exec()
tr.psetex(job_key, expires_ms, job)
tr.zadd(queue_name, score, job_id)
try:
await tr.execute()
except MultiExecError:
# job got enqueued since we checked 'job_exists'
return
return Job(job_id, self) | Enqueue a job.
:param function: Name of the function to call
:param args: args to pass to the function
:param _job_id: ID of the job, can be used to enforce job uniqueness
:param _defer_until: datetime at which to run the job
:param _defer_by: duration to wait before running the job
:param _expires: if the job still hasn't started after this duration, do not run it
:param _job_try: useful when re-enqueueing jobs within a job
:param kwargs: any keyword arguments to pass to the function
:return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/connections.py#L48-L107 | [
"def to_ms(td: Optional[SecondsTimedelta]) -> Optional[int]:\n if td is None:\n return td\n elif isinstance(td, timedelta):\n td = td.total_seconds()\n return as_int(td * 1000)\n"
] | class ArqRedis(Redis):
"""
Thin subclass of ``aioredis.Redis`` which adds :func:`arq.connections.enqueue_job`.
"""
async def _get_job_result(self, key):
job_id = key[len(result_key_prefix) :]
job = Job(job_id, self)
r = await job.result_info()
r.job_id = job_id
return r
async def all_job_results(self) -> List[JobResult]:
"""
Get results for all jobs in redis.
"""
keys = await self.keys(result_key_prefix + '*')
results = await asyncio.gather(*[self._get_job_result(k) for k in keys])
return sorted(results, key=attrgetter('enqueue_time'))
|
samuelcolvin/arq | arq/connections.py | ArqRedis.all_job_results | python | async def all_job_results(self) -> List[JobResult]:
keys = await self.keys(result_key_prefix + '*')
results = await asyncio.gather(*[self._get_job_result(k) for k in keys])
return sorted(results, key=attrgetter('enqueue_time')) | Get results for all jobs in redis. | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/connections.py#L116-L122 | null | class ArqRedis(Redis):
"""
Thin subclass of ``aioredis.Redis`` which adds :func:`arq.connections.enqueue_job`.
"""
async def enqueue_job(
self,
function: str,
*args: Any,
_job_id: Optional[str] = None,
_defer_until: Optional[datetime] = None,
_defer_by: Union[None, int, float, timedelta] = None,
_expires: Union[None, int, float, timedelta] = None,
_job_try: Optional[int] = None,
**kwargs: Any,
) -> Optional[Job]:
"""
Enqueue a job.
:param function: Name of the function to call
:param args: args to pass to the function
:param _job_id: ID of the job, can be used to enforce job uniqueness
:param _defer_until: datetime at which to run the job
:param _defer_by: duration to wait before running the job
:param _expires: if the job still hasn't started after this duration, do not run it
:param _job_try: useful when re-enqueueing jobs within a job
:param kwargs: any keyword arguments to pass to the function
:return: :class:`arq.jobs.Job` instance or ``None`` if a job with this ID already exists
"""
job_id = _job_id or uuid4().hex
job_key = job_key_prefix + job_id
assert not (_defer_until and _defer_by), "use either 'defer_until' or 'defer_by' or neither, not both"
defer_by_ms = to_ms(_defer_by)
expires_ms = to_ms(_expires)
with await self as conn:
pipe = conn.pipeline()
pipe.unwatch()
pipe.watch(job_key)
job_exists = pipe.exists(job_key)
await pipe.execute()
if await job_exists:
return
enqueue_time_ms = timestamp_ms()
if _defer_until is not None:
score = to_unix_ms(_defer_until)
elif defer_by_ms:
score = enqueue_time_ms + defer_by_ms
else:
score = enqueue_time_ms
expires_ms = expires_ms or score - enqueue_time_ms + expires_extra_ms
job = pickle_job(function, args, kwargs, _job_try, enqueue_time_ms)
tr = conn.multi_exec()
tr.psetex(job_key, expires_ms, job)
tr.zadd(queue_name, score, job_id)
try:
await tr.execute()
except MultiExecError:
# job got enqueued since we checked 'job_exists'
return
return Job(job_id, self)
async def _get_job_result(self, key):
job_id = key[len(result_key_prefix) :]
job = Job(job_id, self)
r = await job.result_info()
r.job_id = job_id
return r
|
samuelcolvin/arq | docs/examples/job_ids.py | main | python | async def main():
redis = await create_pool(RedisSettings())
# no id, random id will be generated
job1 = await redis.enqueue_job('the_task')
print(job1)
# random id again, again the job will be enqueued and a job will be returned
job2 = await redis.enqueue_job('the_task')
print(job2)
"""
> <arq job 7d2163c056e54b62a4d8404921094f05>
"""
# custom job id, job will be enqueued
job3 = await redis.enqueue_job('the_task', _job_id='foobar')
print(job3)
"""
> <arq job foobar>
"""
# same custom job id, job will not be enqueued and enqueue_job will return None
job4 = await redis.enqueue_job('the_task', _job_id='foobar')
print(job4)
"""
> None
""" | > <arq job 99edfef86ccf4145b2f64ee160fa3297> | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/docs/examples/job_ids.py#L9-L38 | [
"async def create_pool(settings: RedisSettings = None, *, _retry: int = 0) -> ArqRedis:\n \"\"\"\n Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails.\n\n Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance,\n thus allowing job enqueuing.\n \"\"\"\n settings = settings or RedisSettings()\n addr = settings.host, settings.port\n try:\n pool = await aioredis.create_redis_pool(\n addr,\n db=settings.database,\n password=settings.password,\n timeout=settings.conn_timeout,\n encoding='utf8',\n commands_factory=ArqRedis,\n )\n except (ConnectionError, OSError, aioredis.RedisError, asyncio.TimeoutError) as e:\n if _retry < settings.conn_retries:\n logger.warning(\n 'redis connection error %s:%s %s %s, %d retries remaining...',\n settings.host,\n settings.port,\n e.__class__.__name__,\n e,\n settings.conn_retries - _retry,\n )\n await asyncio.sleep(settings.conn_retry_delay)\n else:\n raise\n else:\n if _retry > 0:\n logger.info('redis connection successful')\n return pool\n\n # recursively attempt to create the pool outside the except block to avoid\n # \"During handling of the above exception...\" madness\n return await create_pool(settings, _retry=_retry + 1)\n"
] | import asyncio
from arq import create_pool
from arq.connections import RedisSettings
async def the_task(ctx):
print('running the task with id', ctx['job_id'])
class WorkerSettings:
functions = [the_task]
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
samuelcolvin/arq | arq/worker.py | func | python | def func(
coroutine: Union[str, Function, Callable],
*,
name: Optional[str] = None,
keep_result: Optional[SecondsTimedelta] = None,
timeout: Optional[SecondsTimedelta] = None,
max_tries: Optional[int] = None,
) -> Function:
if isinstance(coroutine, Function):
return coroutine
if isinstance(coroutine, str):
name = name or coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return Function(name or coroutine.__qualname__, coroutine, timeout, keep_result, max_tries) | Wrapper for a job function which lets you configure more settings.
:param coroutine: coroutine function to call, can be a string to import
:param name: name for function, if None, ``coroutine.__qualname__`` is used
:param keep_result: duration to keep the result for, if 0 the result is not kept
:param timeout: maximum time the job should take
:param max_tries: maximum number of tries allowed for the function, use 1 to prevent retrying | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/worker.py#L53-L81 | null | import asyncio
import inspect
import logging
import signal
from dataclasses import dataclass
from datetime import datetime
from functools import partial
from signal import Signals
from time import time
from typing import Awaitable, Callable, Dict, List, Optional, Sequence, Union
import async_timeout
from aioredis import MultiExecError
from pydantic.utils import import_string
from arq.cron import CronJob
from arq.jobs import pickle_result, unpickle_job_raw
from .connections import ArqRedis, RedisSettings, create_pool, log_redis_info
from .constants import (
health_check_key,
in_progress_key_prefix,
job_key_prefix,
queue_name,
result_key_prefix,
retry_key_prefix,
)
from .utils import (
SecondsTimedelta,
args_to_string,
ms_to_datetime,
poll,
timestamp_ms,
to_ms,
to_seconds,
to_unix_ms,
truncate,
)
logger = logging.getLogger('arq.worker')
no_result = object()
@dataclass
class Function:
name: str
coroutine: Callable
timeout_s: Optional[float]
keep_result_s: Optional[float]
max_tries: Optional[int]
class Retry(RuntimeError):
"""
Special exception to retry the job (if ``max_retries`` hasn't been reached).
:param defer: duration to wait before rerunning the job
"""
__slots__ = ('defer_score',)
def __init__(self, defer: Optional[SecondsTimedelta] = None):
self.defer_score = to_ms(defer)
def __repr__(self):
return f'<Retry defer {(self.defer_score or 0) / 1000:0.2f}s>'
def __str__(self):
return repr(self)
class FailedJobs(RuntimeError):
def __init__(self, count, job_results):
self.count = count
self.job_results = job_results
def __str__(self):
if self.count == 1 and self.job_results:
exc = self.job_results[0].result
return f'1 job failed "{exc.__class__.__name__}: {exc}"'
else:
return f'{self.count} jobs failed'
def __repr__(self):
return f'<{str(self)}>'
class Worker:
"""
Main class for running jobs.
:param functions: list of functions to register, can either be raw coroutine functions or the
result of :func:`arq.worker.func`.
:param cron_jobs: list of cron jobs to run, use :func:`arq.cron.cron` to create them
:param redis_settings: settings for creating a redis connection
:param redis_pool: existing redis pool, generally None
:param burst: whether to stop the worker once all jobs have been run
:param on_startup: coroutine function to run at startup
:param on_shutdown: coroutine function to run at shutdown
:param max_jobs: maximum number of jobs to run at a time
:param job_timeout: default job timeout (max run time)
:param keep_result: default duration to keep job results for
:param poll_delay: duration between polling the queue for new jobs
:param max_tries: default maximum number of times to retry a job
:param health_check_interval: how often to set the health check key
"""
def __init__(
self,
functions: Sequence[Function] = (),
*,
cron_jobs: Optional[Sequence[CronJob]] = None,
redis_settings: RedisSettings = None,
redis_pool: ArqRedis = None,
burst: bool = False,
on_startup: Callable[[Dict], Awaitable] = None,
on_shutdown: Callable[[Dict], Awaitable] = None,
max_jobs: int = 10,
job_timeout: SecondsTimedelta = 300,
keep_result: SecondsTimedelta = 3600,
poll_delay: SecondsTimedelta = 0.5,
max_tries: int = 5,
health_check_interval: SecondsTimedelta = 3600,
ctx: Optional[Dict] = None,
):
self.functions: Dict[str, Union[Function, CronJob]] = {f.name: f for f in map(func, functions)}
self.cron_jobs: List[CronJob] = []
if cron_jobs:
assert all(isinstance(cj, CronJob) for cj in cron_jobs), 'cron_jobs, must be instances of CronJob'
self.cron_jobs = cron_jobs
self.functions.update({cj.name: cj for cj in self.cron_jobs})
assert len(self.functions) > 0, 'at least one function or cron_job must be registered'
self.burst = burst
self.on_startup = on_startup
self.on_shutdown = on_shutdown
self.sem = asyncio.BoundedSemaphore(max_jobs)
self.job_timeout_s = to_seconds(job_timeout)
self.keep_result_s = to_seconds(keep_result)
self.poll_delay_s = to_seconds(poll_delay)
self.max_tries = max_tries
self.health_check_interval = to_seconds(health_check_interval)
self.pool = redis_pool
if self.pool is None:
self.redis_settings = redis_settings or RedisSettings()
else:
self.redis_settings = None
self.tasks = []
self.main_task = None
self.loop = asyncio.get_event_loop()
self.ctx = ctx or {}
max_timeout = max(f.timeout_s or self.job_timeout_s for f in self.functions.values())
self.in_progress_timeout_s = max_timeout + 10
self.jobs_complete = 0
self.jobs_retried = 0
self.jobs_failed = 0
self._last_health_check = 0
self._last_health_check_log = None
self._add_signal_handler(signal.SIGINT, self.handle_sig)
self._add_signal_handler(signal.SIGTERM, self.handle_sig)
self.on_stop = None
def run(self) -> None:
"""
Sync function to run the worker, finally closes worker connections.
"""
self.main_task = self.loop.create_task(self.main())
try:
self.loop.run_until_complete(self.main_task)
except asyncio.CancelledError:
# happens on shutdown, fine
pass
finally:
self.loop.run_until_complete(self.close())
async def async_run(self) -> None:
"""
Asynchronously run the worker, does not close connections. Useful when testing.
"""
self.main_task = self.loop.create_task(self.main())
await self.main_task
async def run_check(self) -> int:
"""
Run :func:`arq.worker.Worker.async_run`, check for failed jobs and raise :class:`arq.worker.FailedJobs`
if any jobs have failed.
:return: number of completed jobs
"""
await self.async_run()
if self.jobs_failed:
failed_job_results = [r for r in await self.pool.all_job_results() if not r.success]
raise FailedJobs(self.jobs_failed, failed_job_results)
else:
return self.jobs_complete
async def main(self):
if self.pool is None:
self.pool = await create_pool(self.redis_settings)
logger.info('Starting worker for %d functions: %s', len(self.functions), ', '.join(self.functions))
await log_redis_info(self.pool, logger.info)
self.ctx['redis'] = self.pool
if self.on_startup:
await self.on_startup(self.ctx)
async for _ in poll(self.poll_delay_s): # noqa F841
async with self.sem: # don't both with zrangebyscore until we have "space" to run the jobs
now = timestamp_ms()
job_ids = await self.pool.zrangebyscore(queue_name, max=now)
await self.run_jobs(job_ids)
# required to make sure errors in run_job get propagated
for t in self.tasks:
if t.done():
self.tasks.remove(t)
t.result()
await self.heart_beat()
if self.burst:
queued_jobs = await self.pool.zcard(queue_name)
if queued_jobs == 0:
return
async def run_jobs(self, job_ids):
for job_id in job_ids:
await self.sem.acquire()
in_progress_key = in_progress_key_prefix + job_id
with await self.pool as conn:
_, _, ongoing_exists, score = await asyncio.gather(
conn.unwatch(),
conn.watch(in_progress_key),
conn.exists(in_progress_key),
conn.zscore(queue_name, job_id),
)
if ongoing_exists or not score:
# job already started elsewhere, or already finished and removed from queue
self.sem.release()
continue
tr = conn.multi_exec()
tr.setex(in_progress_key, self.in_progress_timeout_s, b'1')
try:
await tr.execute()
except MultiExecError:
# job already started elsewhere since we got 'existing'
self.sem.release()
else:
self.tasks.append(self.loop.create_task(self.run_job(job_id, score)))
async def run_job(self, job_id, score): # noqa: C901
v, job_try, _ = await asyncio.gather(
self.pool.get(job_key_prefix + job_id, encoding=None),
self.pool.incr(retry_key_prefix + job_id),
self.pool.expire(retry_key_prefix + job_id, 88400),
)
if not v:
logger.warning('job %s expired', job_id)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
function_name, args, kwargs, enqueue_job_try, enqueue_time_ms = unpickle_job_raw(v)
try:
function: Union[Function, CronJob] = self.functions[function_name]
except KeyError:
logger.warning('job %s, function %r not found', job_id, function_name)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
if hasattr(function, 'next_run'):
# cron_job
ref = function_name
else:
ref = f'{job_id}:{function_name}'
if enqueue_job_try and enqueue_job_try > job_try:
job_try = enqueue_job_try
await self.pool.setex(retry_key_prefix + job_id, 88400, str(job_try))
max_tries = self.max_tries if function.max_tries is None else function.max_tries
if job_try > max_tries:
t = (timestamp_ms() - enqueue_time_ms) / 1000
logger.warning('%6.2fs ! %s max retries %d exceeded', t, ref, max_tries)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
result = no_result
exc_extra = None
finish = False
timeout_s = self.job_timeout_s if function.timeout_s is None else function.timeout_s
incr_score = None
job_ctx = {
'job_id': job_id,
'job_try': job_try,
'enqueue_time': ms_to_datetime(enqueue_time_ms),
'score': score,
}
ctx = {**self.ctx, **job_ctx}
start_ms = timestamp_ms()
success = False
try:
s = args_to_string(args, kwargs)
extra = f' try={job_try}' if job_try > 1 else ''
if (start_ms - score) > 1200:
extra += f' delayed={(start_ms - score) / 1000:0.2f}s'
logger.info('%6.2fs → %s(%s)%s', (start_ms - enqueue_time_ms) / 1000, ref, s, extra)
# run repr(result) and extra inside try/except as they can raise exceptions
try:
async with async_timeout.timeout(timeout_s):
result = await function.coroutine(ctx, *args, **kwargs)
except Exception as e:
exc_extra = getattr(e, 'extra', None)
if callable(exc_extra):
exc_extra = exc_extra()
raise
else:
result_str = '' if result is None else truncate(repr(result))
except Exception as e:
finished_ms = timestamp_ms()
t = (finished_ms - start_ms) / 1000
if isinstance(e, Retry):
incr_score = e.defer_score
logger.info('%6.2fs ↻ %s retrying job in %0.2fs', t, ref, (e.defer_score or 0) / 1000)
if e.defer_score:
incr_score = e.defer_score + (timestamp_ms() - score)
self.jobs_retried += 1
elif isinstance(e, asyncio.CancelledError):
logger.info('%6.2fs ↻ %s cancelled, will be run again', t, ref)
self.jobs_retried += 1
else:
logger.exception(
'%6.2fs ! %s failed, %s: %s', t, ref, e.__class__.__name__, e, extra={'extra': exc_extra}
)
result = e
finish = True
self.jobs_failed += 1
else:
success = True
finished_ms = timestamp_ms()
logger.info('%6.2fs ← %s ● %s', (finished_ms - start_ms) / 1000, ref, result_str)
finish = True
self.jobs_complete += 1
result_timeout_s = self.keep_result_s if function.keep_result_s is None else function.keep_result_s
result_data = None
if result is not no_result and result_timeout_s > 0:
result_data = pickle_result(
function_name, args, kwargs, job_try, enqueue_time_ms, success, result, start_ms, finished_ms, ref
)
await asyncio.shield(self.finish_job(job_id, finish, result_data, result_timeout_s, incr_score))
async def finish_job(self, job_id, finish, result_data, result_timeout_s, incr_score):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
delete_keys = [in_progress_key_prefix + job_id]
if finish:
if result_data:
tr.setex(result_key_prefix + job_id, result_timeout_s, result_data)
delete_keys += [retry_key_prefix + job_id, job_key_prefix + job_id]
tr.zrem(queue_name, job_id)
elif incr_score:
tr.zincrby(queue_name, incr_score, job_id)
tr.delete(*delete_keys)
await tr.execute()
self.sem.release()
async def abort_job(self, job_id):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
tr.delete(retry_key_prefix + job_id, in_progress_key_prefix + job_id, job_key_prefix + job_id)
tr.zrem(queue_name, job_id)
await tr.execute()
async def heart_beat(self):
await self.record_health()
await self.run_cron()
async def run_cron(self):
n = datetime.now()
job_futures = set()
for cron_job in self.cron_jobs:
if cron_job.next_run is None:
if cron_job.run_at_startup:
cron_job.next_run = n
else:
cron_job.set_next(n)
if n >= cron_job.next_run:
job_id = f'{cron_job.name}:{to_unix_ms(cron_job.next_run)}' if cron_job.unique else None
job_futures.add(self.pool.enqueue_job(cron_job.name, _job_id=job_id))
cron_job.set_next(n)
job_futures and await asyncio.gather(*job_futures)
async def record_health(self):
now_ts = time()
if (now_ts - self._last_health_check) < self.health_check_interval:
return
self._last_health_check = now_ts
pending_tasks = sum(not t.done() for t in self.tasks)
queued = await self.pool.zcard(queue_name)
info = (
f'{datetime.now():%b-%d %H:%M:%S} j_complete={self.jobs_complete} j_failed={self.jobs_failed} '
f'j_retried={self.jobs_retried} j_ongoing={pending_tasks} queued={queued}'
)
await self.pool.setex(health_check_key, self.health_check_interval + 1, info.encode())
log_suffix = info[info.index('j_complete=') :]
if self._last_health_check_log and log_suffix != self._last_health_check_log:
logger.info('recording health: %s', info)
self._last_health_check_log = log_suffix
elif not self._last_health_check_log:
self._last_health_check_log = log_suffix
def _add_signal_handler(self, signal, handler):
self.loop.add_signal_handler(signal, partial(handler, signal))
def handle_sig(self, signum):
sig = Signals(signum)
logger.info(
'shutdown on %s ◆ %d jobs complete ◆ %d failed ◆ %d retries ◆ %d ongoing to cancel',
sig.name,
self.jobs_complete,
self.jobs_failed,
self.jobs_retried,
len(self.tasks),
)
for t in self.tasks:
if not t.done():
t.cancel()
self.main_task and self.main_task.cancel()
self.on_stop and self.on_stop(sig)
async def close(self):
if not self.pool:
return
await asyncio.gather(*self.tasks)
await self.pool.delete(health_check_key)
if self.on_shutdown:
await self.on_shutdown(self.ctx)
self.pool.close()
await self.pool.wait_closed()
self.pool = None
def __repr__(self):
return (
f'<Worker j_complete={self.jobs_complete} j_failed={self.jobs_failed} j_retried={self.jobs_retried} '
f'j_ongoing={sum(not t.done() for t in self.tasks)}>'
)
def get_kwargs(settings_cls):
worker_args = set(inspect.signature(Worker).parameters.keys())
d = settings_cls if isinstance(settings_cls, dict) else settings_cls.__dict__
return {k: v for k, v in d.items() if k in worker_args}
def create_worker(settings_cls, **kwargs) -> Worker:
return Worker(**{**get_kwargs(settings_cls), **kwargs})
def run_worker(settings_cls, **kwargs) -> Worker:
worker = create_worker(settings_cls, **kwargs)
worker.run()
return worker
async def async_check_health(redis_settings: Optional[RedisSettings]):
redis_settings = redis_settings or RedisSettings()
redis: ArqRedis = await create_pool(redis_settings)
data = await redis.get(health_check_key)
if not data:
logger.warning('Health check failed: no health check sentinel value found')
r = 1
else:
logger.info('Health check successful: %s', data)
r = 0
redis.close()
await redis.wait_closed()
return r
def check_health(settings_cls) -> int:
"""
Run a health check on the worker and return the appropriate exit code.
:return: 0 if successful, 1 if not
"""
cls_kwargs = get_kwargs(settings_cls)
loop = asyncio.get_event_loop()
return loop.run_until_complete(async_check_health(cls_kwargs.get('redis_settings')))
|
samuelcolvin/arq | arq/worker.py | check_health | python | def check_health(settings_cls) -> int:
cls_kwargs = get_kwargs(settings_cls)
loop = asyncio.get_event_loop()
return loop.run_until_complete(async_check_health(cls_kwargs.get('redis_settings'))) | Run a health check on the worker and return the appropriate exit code.
:return: 0 if successful, 1 if not | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/worker.py#L518-L525 | [
"def get_kwargs(settings_cls):\n worker_args = set(inspect.signature(Worker).parameters.keys())\n d = settings_cls if isinstance(settings_cls, dict) else settings_cls.__dict__\n return {k: v for k, v in d.items() if k in worker_args}\n",
"async def async_check_health(redis_settings: Optional[RedisSettings]):\n redis_settings = redis_settings or RedisSettings()\n redis: ArqRedis = await create_pool(redis_settings)\n data = await redis.get(health_check_key)\n if not data:\n logger.warning('Health check failed: no health check sentinel value found')\n r = 1\n else:\n logger.info('Health check successful: %s', data)\n r = 0\n redis.close()\n await redis.wait_closed()\n return r\n"
] | import asyncio
import inspect
import logging
import signal
from dataclasses import dataclass
from datetime import datetime
from functools import partial
from signal import Signals
from time import time
from typing import Awaitable, Callable, Dict, List, Optional, Sequence, Union
import async_timeout
from aioredis import MultiExecError
from pydantic.utils import import_string
from arq.cron import CronJob
from arq.jobs import pickle_result, unpickle_job_raw
from .connections import ArqRedis, RedisSettings, create_pool, log_redis_info
from .constants import (
health_check_key,
in_progress_key_prefix,
job_key_prefix,
queue_name,
result_key_prefix,
retry_key_prefix,
)
from .utils import (
SecondsTimedelta,
args_to_string,
ms_to_datetime,
poll,
timestamp_ms,
to_ms,
to_seconds,
to_unix_ms,
truncate,
)
logger = logging.getLogger('arq.worker')
no_result = object()
@dataclass
class Function:
name: str
coroutine: Callable
timeout_s: Optional[float]
keep_result_s: Optional[float]
max_tries: Optional[int]
def func(
coroutine: Union[str, Function, Callable],
*,
name: Optional[str] = None,
keep_result: Optional[SecondsTimedelta] = None,
timeout: Optional[SecondsTimedelta] = None,
max_tries: Optional[int] = None,
) -> Function:
"""
Wrapper for a job function which lets you configure more settings.
:param coroutine: coroutine function to call, can be a string to import
:param name: name for function, if None, ``coroutine.__qualname__`` is used
:param keep_result: duration to keep the result for, if 0 the result is not kept
:param timeout: maximum time the job should take
:param max_tries: maximum number of tries allowed for the function, use 1 to prevent retrying
"""
if isinstance(coroutine, Function):
return coroutine
if isinstance(coroutine, str):
name = name or coroutine
coroutine = import_string(coroutine)
assert asyncio.iscoroutinefunction(coroutine), f'{coroutine} is not a coroutine function'
timeout = to_seconds(timeout)
keep_result = to_seconds(keep_result)
return Function(name or coroutine.__qualname__, coroutine, timeout, keep_result, max_tries)
class Retry(RuntimeError):
"""
Special exception to retry the job (if ``max_retries`` hasn't been reached).
:param defer: duration to wait before rerunning the job
"""
__slots__ = ('defer_score',)
def __init__(self, defer: Optional[SecondsTimedelta] = None):
self.defer_score = to_ms(defer)
def __repr__(self):
return f'<Retry defer {(self.defer_score or 0) / 1000:0.2f}s>'
def __str__(self):
return repr(self)
class FailedJobs(RuntimeError):
def __init__(self, count, job_results):
self.count = count
self.job_results = job_results
def __str__(self):
if self.count == 1 and self.job_results:
exc = self.job_results[0].result
return f'1 job failed "{exc.__class__.__name__}: {exc}"'
else:
return f'{self.count} jobs failed'
def __repr__(self):
return f'<{str(self)}>'
class Worker:
"""
Main class for running jobs.
:param functions: list of functions to register, can either be raw coroutine functions or the
result of :func:`arq.worker.func`.
:param cron_jobs: list of cron jobs to run, use :func:`arq.cron.cron` to create them
:param redis_settings: settings for creating a redis connection
:param redis_pool: existing redis pool, generally None
:param burst: whether to stop the worker once all jobs have been run
:param on_startup: coroutine function to run at startup
:param on_shutdown: coroutine function to run at shutdown
:param max_jobs: maximum number of jobs to run at a time
:param job_timeout: default job timeout (max run time)
:param keep_result: default duration to keep job results for
:param poll_delay: duration between polling the queue for new jobs
:param max_tries: default maximum number of times to retry a job
:param health_check_interval: how often to set the health check key
"""
def __init__(
self,
functions: Sequence[Function] = (),
*,
cron_jobs: Optional[Sequence[CronJob]] = None,
redis_settings: RedisSettings = None,
redis_pool: ArqRedis = None,
burst: bool = False,
on_startup: Callable[[Dict], Awaitable] = None,
on_shutdown: Callable[[Dict], Awaitable] = None,
max_jobs: int = 10,
job_timeout: SecondsTimedelta = 300,
keep_result: SecondsTimedelta = 3600,
poll_delay: SecondsTimedelta = 0.5,
max_tries: int = 5,
health_check_interval: SecondsTimedelta = 3600,
ctx: Optional[Dict] = None,
):
self.functions: Dict[str, Union[Function, CronJob]] = {f.name: f for f in map(func, functions)}
self.cron_jobs: List[CronJob] = []
if cron_jobs:
assert all(isinstance(cj, CronJob) for cj in cron_jobs), 'cron_jobs, must be instances of CronJob'
self.cron_jobs = cron_jobs
self.functions.update({cj.name: cj for cj in self.cron_jobs})
assert len(self.functions) > 0, 'at least one function or cron_job must be registered'
self.burst = burst
self.on_startup = on_startup
self.on_shutdown = on_shutdown
self.sem = asyncio.BoundedSemaphore(max_jobs)
self.job_timeout_s = to_seconds(job_timeout)
self.keep_result_s = to_seconds(keep_result)
self.poll_delay_s = to_seconds(poll_delay)
self.max_tries = max_tries
self.health_check_interval = to_seconds(health_check_interval)
self.pool = redis_pool
if self.pool is None:
self.redis_settings = redis_settings or RedisSettings()
else:
self.redis_settings = None
self.tasks = []
self.main_task = None
self.loop = asyncio.get_event_loop()
self.ctx = ctx or {}
max_timeout = max(f.timeout_s or self.job_timeout_s for f in self.functions.values())
self.in_progress_timeout_s = max_timeout + 10
self.jobs_complete = 0
self.jobs_retried = 0
self.jobs_failed = 0
self._last_health_check = 0
self._last_health_check_log = None
self._add_signal_handler(signal.SIGINT, self.handle_sig)
self._add_signal_handler(signal.SIGTERM, self.handle_sig)
self.on_stop = None
def run(self) -> None:
"""
Sync function to run the worker, finally closes worker connections.
"""
self.main_task = self.loop.create_task(self.main())
try:
self.loop.run_until_complete(self.main_task)
except asyncio.CancelledError:
# happens on shutdown, fine
pass
finally:
self.loop.run_until_complete(self.close())
async def async_run(self) -> None:
"""
Asynchronously run the worker, does not close connections. Useful when testing.
"""
self.main_task = self.loop.create_task(self.main())
await self.main_task
async def run_check(self) -> int:
"""
Run :func:`arq.worker.Worker.async_run`, check for failed jobs and raise :class:`arq.worker.FailedJobs`
if any jobs have failed.
:return: number of completed jobs
"""
await self.async_run()
if self.jobs_failed:
failed_job_results = [r for r in await self.pool.all_job_results() if not r.success]
raise FailedJobs(self.jobs_failed, failed_job_results)
else:
return self.jobs_complete
async def main(self):
if self.pool is None:
self.pool = await create_pool(self.redis_settings)
logger.info('Starting worker for %d functions: %s', len(self.functions), ', '.join(self.functions))
await log_redis_info(self.pool, logger.info)
self.ctx['redis'] = self.pool
if self.on_startup:
await self.on_startup(self.ctx)
async for _ in poll(self.poll_delay_s): # noqa F841
async with self.sem: # don't both with zrangebyscore until we have "space" to run the jobs
now = timestamp_ms()
job_ids = await self.pool.zrangebyscore(queue_name, max=now)
await self.run_jobs(job_ids)
# required to make sure errors in run_job get propagated
for t in self.tasks:
if t.done():
self.tasks.remove(t)
t.result()
await self.heart_beat()
if self.burst:
queued_jobs = await self.pool.zcard(queue_name)
if queued_jobs == 0:
return
async def run_jobs(self, job_ids):
for job_id in job_ids:
await self.sem.acquire()
in_progress_key = in_progress_key_prefix + job_id
with await self.pool as conn:
_, _, ongoing_exists, score = await asyncio.gather(
conn.unwatch(),
conn.watch(in_progress_key),
conn.exists(in_progress_key),
conn.zscore(queue_name, job_id),
)
if ongoing_exists or not score:
# job already started elsewhere, or already finished and removed from queue
self.sem.release()
continue
tr = conn.multi_exec()
tr.setex(in_progress_key, self.in_progress_timeout_s, b'1')
try:
await tr.execute()
except MultiExecError:
# job already started elsewhere since we got 'existing'
self.sem.release()
else:
self.tasks.append(self.loop.create_task(self.run_job(job_id, score)))
async def run_job(self, job_id, score): # noqa: C901
v, job_try, _ = await asyncio.gather(
self.pool.get(job_key_prefix + job_id, encoding=None),
self.pool.incr(retry_key_prefix + job_id),
self.pool.expire(retry_key_prefix + job_id, 88400),
)
if not v:
logger.warning('job %s expired', job_id)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
function_name, args, kwargs, enqueue_job_try, enqueue_time_ms = unpickle_job_raw(v)
try:
function: Union[Function, CronJob] = self.functions[function_name]
except KeyError:
logger.warning('job %s, function %r not found', job_id, function_name)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
if hasattr(function, 'next_run'):
# cron_job
ref = function_name
else:
ref = f'{job_id}:{function_name}'
if enqueue_job_try and enqueue_job_try > job_try:
job_try = enqueue_job_try
await self.pool.setex(retry_key_prefix + job_id, 88400, str(job_try))
max_tries = self.max_tries if function.max_tries is None else function.max_tries
if job_try > max_tries:
t = (timestamp_ms() - enqueue_time_ms) / 1000
logger.warning('%6.2fs ! %s max retries %d exceeded', t, ref, max_tries)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
result = no_result
exc_extra = None
finish = False
timeout_s = self.job_timeout_s if function.timeout_s is None else function.timeout_s
incr_score = None
job_ctx = {
'job_id': job_id,
'job_try': job_try,
'enqueue_time': ms_to_datetime(enqueue_time_ms),
'score': score,
}
ctx = {**self.ctx, **job_ctx}
start_ms = timestamp_ms()
success = False
try:
s = args_to_string(args, kwargs)
extra = f' try={job_try}' if job_try > 1 else ''
if (start_ms - score) > 1200:
extra += f' delayed={(start_ms - score) / 1000:0.2f}s'
logger.info('%6.2fs → %s(%s)%s', (start_ms - enqueue_time_ms) / 1000, ref, s, extra)
# run repr(result) and extra inside try/except as they can raise exceptions
try:
async with async_timeout.timeout(timeout_s):
result = await function.coroutine(ctx, *args, **kwargs)
except Exception as e:
exc_extra = getattr(e, 'extra', None)
if callable(exc_extra):
exc_extra = exc_extra()
raise
else:
result_str = '' if result is None else truncate(repr(result))
except Exception as e:
finished_ms = timestamp_ms()
t = (finished_ms - start_ms) / 1000
if isinstance(e, Retry):
incr_score = e.defer_score
logger.info('%6.2fs ↻ %s retrying job in %0.2fs', t, ref, (e.defer_score or 0) / 1000)
if e.defer_score:
incr_score = e.defer_score + (timestamp_ms() - score)
self.jobs_retried += 1
elif isinstance(e, asyncio.CancelledError):
logger.info('%6.2fs ↻ %s cancelled, will be run again', t, ref)
self.jobs_retried += 1
else:
logger.exception(
'%6.2fs ! %s failed, %s: %s', t, ref, e.__class__.__name__, e, extra={'extra': exc_extra}
)
result = e
finish = True
self.jobs_failed += 1
else:
success = True
finished_ms = timestamp_ms()
logger.info('%6.2fs ← %s ● %s', (finished_ms - start_ms) / 1000, ref, result_str)
finish = True
self.jobs_complete += 1
result_timeout_s = self.keep_result_s if function.keep_result_s is None else function.keep_result_s
result_data = None
if result is not no_result and result_timeout_s > 0:
result_data = pickle_result(
function_name, args, kwargs, job_try, enqueue_time_ms, success, result, start_ms, finished_ms, ref
)
await asyncio.shield(self.finish_job(job_id, finish, result_data, result_timeout_s, incr_score))
async def finish_job(self, job_id, finish, result_data, result_timeout_s, incr_score):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
delete_keys = [in_progress_key_prefix + job_id]
if finish:
if result_data:
tr.setex(result_key_prefix + job_id, result_timeout_s, result_data)
delete_keys += [retry_key_prefix + job_id, job_key_prefix + job_id]
tr.zrem(queue_name, job_id)
elif incr_score:
tr.zincrby(queue_name, incr_score, job_id)
tr.delete(*delete_keys)
await tr.execute()
self.sem.release()
async def abort_job(self, job_id):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
tr.delete(retry_key_prefix + job_id, in_progress_key_prefix + job_id, job_key_prefix + job_id)
tr.zrem(queue_name, job_id)
await tr.execute()
async def heart_beat(self):
await self.record_health()
await self.run_cron()
async def run_cron(self):
n = datetime.now()
job_futures = set()
for cron_job in self.cron_jobs:
if cron_job.next_run is None:
if cron_job.run_at_startup:
cron_job.next_run = n
else:
cron_job.set_next(n)
if n >= cron_job.next_run:
job_id = f'{cron_job.name}:{to_unix_ms(cron_job.next_run)}' if cron_job.unique else None
job_futures.add(self.pool.enqueue_job(cron_job.name, _job_id=job_id))
cron_job.set_next(n)
job_futures and await asyncio.gather(*job_futures)
async def record_health(self):
now_ts = time()
if (now_ts - self._last_health_check) < self.health_check_interval:
return
self._last_health_check = now_ts
pending_tasks = sum(not t.done() for t in self.tasks)
queued = await self.pool.zcard(queue_name)
info = (
f'{datetime.now():%b-%d %H:%M:%S} j_complete={self.jobs_complete} j_failed={self.jobs_failed} '
f'j_retried={self.jobs_retried} j_ongoing={pending_tasks} queued={queued}'
)
await self.pool.setex(health_check_key, self.health_check_interval + 1, info.encode())
log_suffix = info[info.index('j_complete=') :]
if self._last_health_check_log and log_suffix != self._last_health_check_log:
logger.info('recording health: %s', info)
self._last_health_check_log = log_suffix
elif not self._last_health_check_log:
self._last_health_check_log = log_suffix
def _add_signal_handler(self, signal, handler):
self.loop.add_signal_handler(signal, partial(handler, signal))
def handle_sig(self, signum):
sig = Signals(signum)
logger.info(
'shutdown on %s ◆ %d jobs complete ◆ %d failed ◆ %d retries ◆ %d ongoing to cancel',
sig.name,
self.jobs_complete,
self.jobs_failed,
self.jobs_retried,
len(self.tasks),
)
for t in self.tasks:
if not t.done():
t.cancel()
self.main_task and self.main_task.cancel()
self.on_stop and self.on_stop(sig)
async def close(self):
if not self.pool:
return
await asyncio.gather(*self.tasks)
await self.pool.delete(health_check_key)
if self.on_shutdown:
await self.on_shutdown(self.ctx)
self.pool.close()
await self.pool.wait_closed()
self.pool = None
def __repr__(self):
return (
f'<Worker j_complete={self.jobs_complete} j_failed={self.jobs_failed} j_retried={self.jobs_retried} '
f'j_ongoing={sum(not t.done() for t in self.tasks)}>'
)
def get_kwargs(settings_cls):
worker_args = set(inspect.signature(Worker).parameters.keys())
d = settings_cls if isinstance(settings_cls, dict) else settings_cls.__dict__
return {k: v for k, v in d.items() if k in worker_args}
def create_worker(settings_cls, **kwargs) -> Worker:
return Worker(**{**get_kwargs(settings_cls), **kwargs})
def run_worker(settings_cls, **kwargs) -> Worker:
worker = create_worker(settings_cls, **kwargs)
worker.run()
return worker
async def async_check_health(redis_settings: Optional[RedisSettings]):
redis_settings = redis_settings or RedisSettings()
redis: ArqRedis = await create_pool(redis_settings)
data = await redis.get(health_check_key)
if not data:
logger.warning('Health check failed: no health check sentinel value found')
r = 1
else:
logger.info('Health check successful: %s', data)
r = 0
redis.close()
await redis.wait_closed()
return r
|
samuelcolvin/arq | arq/worker.py | Worker.run | python | def run(self) -> None:
self.main_task = self.loop.create_task(self.main())
try:
self.loop.run_until_complete(self.main_task)
except asyncio.CancelledError:
# happens on shutdown, fine
pass
finally:
self.loop.run_until_complete(self.close()) | Sync function to run the worker, finally closes worker connections. | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/worker.py#L193-L204 | [
"async def main(self):\n if self.pool is None:\n self.pool = await create_pool(self.redis_settings)\n\n logger.info('Starting worker for %d functions: %s', len(self.functions), ', '.join(self.functions))\n await log_redis_info(self.pool, logger.info)\n self.ctx['redis'] = self.pool\n if self.on_startup:\n await self.on_startup(self.ctx)\n\n async for _ in poll(self.poll_delay_s): # noqa F841\n async with self.sem: # don't both with zrangebyscore until we have \"space\" to run the jobs\n now = timestamp_ms()\n job_ids = await self.pool.zrangebyscore(queue_name, max=now)\n await self.run_jobs(job_ids)\n\n # required to make sure errors in run_job get propagated\n for t in self.tasks:\n if t.done():\n self.tasks.remove(t)\n t.result()\n\n await self.heart_beat()\n\n if self.burst:\n queued_jobs = await self.pool.zcard(queue_name)\n if queued_jobs == 0:\n return\n",
"async def close(self):\n if not self.pool:\n return\n await asyncio.gather(*self.tasks)\n await self.pool.delete(health_check_key)\n if self.on_shutdown:\n await self.on_shutdown(self.ctx)\n self.pool.close()\n await self.pool.wait_closed()\n self.pool = None\n"
] | class Worker:
"""
Main class for running jobs.
:param functions: list of functions to register, can either be raw coroutine functions or the
result of :func:`arq.worker.func`.
:param cron_jobs: list of cron jobs to run, use :func:`arq.cron.cron` to create them
:param redis_settings: settings for creating a redis connection
:param redis_pool: existing redis pool, generally None
:param burst: whether to stop the worker once all jobs have been run
:param on_startup: coroutine function to run at startup
:param on_shutdown: coroutine function to run at shutdown
:param max_jobs: maximum number of jobs to run at a time
:param job_timeout: default job timeout (max run time)
:param keep_result: default duration to keep job results for
:param poll_delay: duration between polling the queue for new jobs
:param max_tries: default maximum number of times to retry a job
:param health_check_interval: how often to set the health check key
"""
def __init__(
self,
functions: Sequence[Function] = (),
*,
cron_jobs: Optional[Sequence[CronJob]] = None,
redis_settings: RedisSettings = None,
redis_pool: ArqRedis = None,
burst: bool = False,
on_startup: Callable[[Dict], Awaitable] = None,
on_shutdown: Callable[[Dict], Awaitable] = None,
max_jobs: int = 10,
job_timeout: SecondsTimedelta = 300,
keep_result: SecondsTimedelta = 3600,
poll_delay: SecondsTimedelta = 0.5,
max_tries: int = 5,
health_check_interval: SecondsTimedelta = 3600,
ctx: Optional[Dict] = None,
):
self.functions: Dict[str, Union[Function, CronJob]] = {f.name: f for f in map(func, functions)}
self.cron_jobs: List[CronJob] = []
if cron_jobs:
assert all(isinstance(cj, CronJob) for cj in cron_jobs), 'cron_jobs, must be instances of CronJob'
self.cron_jobs = cron_jobs
self.functions.update({cj.name: cj for cj in self.cron_jobs})
assert len(self.functions) > 0, 'at least one function or cron_job must be registered'
self.burst = burst
self.on_startup = on_startup
self.on_shutdown = on_shutdown
self.sem = asyncio.BoundedSemaphore(max_jobs)
self.job_timeout_s = to_seconds(job_timeout)
self.keep_result_s = to_seconds(keep_result)
self.poll_delay_s = to_seconds(poll_delay)
self.max_tries = max_tries
self.health_check_interval = to_seconds(health_check_interval)
self.pool = redis_pool
if self.pool is None:
self.redis_settings = redis_settings or RedisSettings()
else:
self.redis_settings = None
self.tasks = []
self.main_task = None
self.loop = asyncio.get_event_loop()
self.ctx = ctx or {}
max_timeout = max(f.timeout_s or self.job_timeout_s for f in self.functions.values())
self.in_progress_timeout_s = max_timeout + 10
self.jobs_complete = 0
self.jobs_retried = 0
self.jobs_failed = 0
self._last_health_check = 0
self._last_health_check_log = None
self._add_signal_handler(signal.SIGINT, self.handle_sig)
self._add_signal_handler(signal.SIGTERM, self.handle_sig)
self.on_stop = None
async def async_run(self) -> None:
"""
Asynchronously run the worker, does not close connections. Useful when testing.
"""
self.main_task = self.loop.create_task(self.main())
await self.main_task
async def run_check(self) -> int:
"""
Run :func:`arq.worker.Worker.async_run`, check for failed jobs and raise :class:`arq.worker.FailedJobs`
if any jobs have failed.
:return: number of completed jobs
"""
await self.async_run()
if self.jobs_failed:
failed_job_results = [r for r in await self.pool.all_job_results() if not r.success]
raise FailedJobs(self.jobs_failed, failed_job_results)
else:
return self.jobs_complete
async def main(self):
if self.pool is None:
self.pool = await create_pool(self.redis_settings)
logger.info('Starting worker for %d functions: %s', len(self.functions), ', '.join(self.functions))
await log_redis_info(self.pool, logger.info)
self.ctx['redis'] = self.pool
if self.on_startup:
await self.on_startup(self.ctx)
async for _ in poll(self.poll_delay_s): # noqa F841
async with self.sem: # don't both with zrangebyscore until we have "space" to run the jobs
now = timestamp_ms()
job_ids = await self.pool.zrangebyscore(queue_name, max=now)
await self.run_jobs(job_ids)
# required to make sure errors in run_job get propagated
for t in self.tasks:
if t.done():
self.tasks.remove(t)
t.result()
await self.heart_beat()
if self.burst:
queued_jobs = await self.pool.zcard(queue_name)
if queued_jobs == 0:
return
async def run_jobs(self, job_ids):
for job_id in job_ids:
await self.sem.acquire()
in_progress_key = in_progress_key_prefix + job_id
with await self.pool as conn:
_, _, ongoing_exists, score = await asyncio.gather(
conn.unwatch(),
conn.watch(in_progress_key),
conn.exists(in_progress_key),
conn.zscore(queue_name, job_id),
)
if ongoing_exists or not score:
# job already started elsewhere, or already finished and removed from queue
self.sem.release()
continue
tr = conn.multi_exec()
tr.setex(in_progress_key, self.in_progress_timeout_s, b'1')
try:
await tr.execute()
except MultiExecError:
# job already started elsewhere since we got 'existing'
self.sem.release()
else:
self.tasks.append(self.loop.create_task(self.run_job(job_id, score)))
async def run_job(self, job_id, score): # noqa: C901
v, job_try, _ = await asyncio.gather(
self.pool.get(job_key_prefix + job_id, encoding=None),
self.pool.incr(retry_key_prefix + job_id),
self.pool.expire(retry_key_prefix + job_id, 88400),
)
if not v:
logger.warning('job %s expired', job_id)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
function_name, args, kwargs, enqueue_job_try, enqueue_time_ms = unpickle_job_raw(v)
try:
function: Union[Function, CronJob] = self.functions[function_name]
except KeyError:
logger.warning('job %s, function %r not found', job_id, function_name)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
if hasattr(function, 'next_run'):
# cron_job
ref = function_name
else:
ref = f'{job_id}:{function_name}'
if enqueue_job_try and enqueue_job_try > job_try:
job_try = enqueue_job_try
await self.pool.setex(retry_key_prefix + job_id, 88400, str(job_try))
max_tries = self.max_tries if function.max_tries is None else function.max_tries
if job_try > max_tries:
t = (timestamp_ms() - enqueue_time_ms) / 1000
logger.warning('%6.2fs ! %s max retries %d exceeded', t, ref, max_tries)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
result = no_result
exc_extra = None
finish = False
timeout_s = self.job_timeout_s if function.timeout_s is None else function.timeout_s
incr_score = None
job_ctx = {
'job_id': job_id,
'job_try': job_try,
'enqueue_time': ms_to_datetime(enqueue_time_ms),
'score': score,
}
ctx = {**self.ctx, **job_ctx}
start_ms = timestamp_ms()
success = False
try:
s = args_to_string(args, kwargs)
extra = f' try={job_try}' if job_try > 1 else ''
if (start_ms - score) > 1200:
extra += f' delayed={(start_ms - score) / 1000:0.2f}s'
logger.info('%6.2fs → %s(%s)%s', (start_ms - enqueue_time_ms) / 1000, ref, s, extra)
# run repr(result) and extra inside try/except as they can raise exceptions
try:
async with async_timeout.timeout(timeout_s):
result = await function.coroutine(ctx, *args, **kwargs)
except Exception as e:
exc_extra = getattr(e, 'extra', None)
if callable(exc_extra):
exc_extra = exc_extra()
raise
else:
result_str = '' if result is None else truncate(repr(result))
except Exception as e:
finished_ms = timestamp_ms()
t = (finished_ms - start_ms) / 1000
if isinstance(e, Retry):
incr_score = e.defer_score
logger.info('%6.2fs ↻ %s retrying job in %0.2fs', t, ref, (e.defer_score or 0) / 1000)
if e.defer_score:
incr_score = e.defer_score + (timestamp_ms() - score)
self.jobs_retried += 1
elif isinstance(e, asyncio.CancelledError):
logger.info('%6.2fs ↻ %s cancelled, will be run again', t, ref)
self.jobs_retried += 1
else:
logger.exception(
'%6.2fs ! %s failed, %s: %s', t, ref, e.__class__.__name__, e, extra={'extra': exc_extra}
)
result = e
finish = True
self.jobs_failed += 1
else:
success = True
finished_ms = timestamp_ms()
logger.info('%6.2fs ← %s ● %s', (finished_ms - start_ms) / 1000, ref, result_str)
finish = True
self.jobs_complete += 1
result_timeout_s = self.keep_result_s if function.keep_result_s is None else function.keep_result_s
result_data = None
if result is not no_result and result_timeout_s > 0:
result_data = pickle_result(
function_name, args, kwargs, job_try, enqueue_time_ms, success, result, start_ms, finished_ms, ref
)
await asyncio.shield(self.finish_job(job_id, finish, result_data, result_timeout_s, incr_score))
async def finish_job(self, job_id, finish, result_data, result_timeout_s, incr_score):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
delete_keys = [in_progress_key_prefix + job_id]
if finish:
if result_data:
tr.setex(result_key_prefix + job_id, result_timeout_s, result_data)
delete_keys += [retry_key_prefix + job_id, job_key_prefix + job_id]
tr.zrem(queue_name, job_id)
elif incr_score:
tr.zincrby(queue_name, incr_score, job_id)
tr.delete(*delete_keys)
await tr.execute()
self.sem.release()
async def abort_job(self, job_id):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
tr.delete(retry_key_prefix + job_id, in_progress_key_prefix + job_id, job_key_prefix + job_id)
tr.zrem(queue_name, job_id)
await tr.execute()
async def heart_beat(self):
await self.record_health()
await self.run_cron()
async def run_cron(self):
n = datetime.now()
job_futures = set()
for cron_job in self.cron_jobs:
if cron_job.next_run is None:
if cron_job.run_at_startup:
cron_job.next_run = n
else:
cron_job.set_next(n)
if n >= cron_job.next_run:
job_id = f'{cron_job.name}:{to_unix_ms(cron_job.next_run)}' if cron_job.unique else None
job_futures.add(self.pool.enqueue_job(cron_job.name, _job_id=job_id))
cron_job.set_next(n)
job_futures and await asyncio.gather(*job_futures)
async def record_health(self):
now_ts = time()
if (now_ts - self._last_health_check) < self.health_check_interval:
return
self._last_health_check = now_ts
pending_tasks = sum(not t.done() for t in self.tasks)
queued = await self.pool.zcard(queue_name)
info = (
f'{datetime.now():%b-%d %H:%M:%S} j_complete={self.jobs_complete} j_failed={self.jobs_failed} '
f'j_retried={self.jobs_retried} j_ongoing={pending_tasks} queued={queued}'
)
await self.pool.setex(health_check_key, self.health_check_interval + 1, info.encode())
log_suffix = info[info.index('j_complete=') :]
if self._last_health_check_log and log_suffix != self._last_health_check_log:
logger.info('recording health: %s', info)
self._last_health_check_log = log_suffix
elif not self._last_health_check_log:
self._last_health_check_log = log_suffix
def _add_signal_handler(self, signal, handler):
self.loop.add_signal_handler(signal, partial(handler, signal))
def handle_sig(self, signum):
sig = Signals(signum)
logger.info(
'shutdown on %s ◆ %d jobs complete ◆ %d failed ◆ %d retries ◆ %d ongoing to cancel',
sig.name,
self.jobs_complete,
self.jobs_failed,
self.jobs_retried,
len(self.tasks),
)
for t in self.tasks:
if not t.done():
t.cancel()
self.main_task and self.main_task.cancel()
self.on_stop and self.on_stop(sig)
async def close(self):
if not self.pool:
return
await asyncio.gather(*self.tasks)
await self.pool.delete(health_check_key)
if self.on_shutdown:
await self.on_shutdown(self.ctx)
self.pool.close()
await self.pool.wait_closed()
self.pool = None
def __repr__(self):
return (
f'<Worker j_complete={self.jobs_complete} j_failed={self.jobs_failed} j_retried={self.jobs_retried} '
f'j_ongoing={sum(not t.done() for t in self.tasks)}>'
)
|
samuelcolvin/arq | arq/worker.py | Worker.async_run | python | async def async_run(self) -> None:
self.main_task = self.loop.create_task(self.main())
await self.main_task | Asynchronously run the worker, does not close connections. Useful when testing. | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/worker.py#L206-L211 | [
"async def main(self):\n if self.pool is None:\n self.pool = await create_pool(self.redis_settings)\n\n logger.info('Starting worker for %d functions: %s', len(self.functions), ', '.join(self.functions))\n await log_redis_info(self.pool, logger.info)\n self.ctx['redis'] = self.pool\n if self.on_startup:\n await self.on_startup(self.ctx)\n\n async for _ in poll(self.poll_delay_s): # noqa F841\n async with self.sem: # don't both with zrangebyscore until we have \"space\" to run the jobs\n now = timestamp_ms()\n job_ids = await self.pool.zrangebyscore(queue_name, max=now)\n await self.run_jobs(job_ids)\n\n # required to make sure errors in run_job get propagated\n for t in self.tasks:\n if t.done():\n self.tasks.remove(t)\n t.result()\n\n await self.heart_beat()\n\n if self.burst:\n queued_jobs = await self.pool.zcard(queue_name)\n if queued_jobs == 0:\n return\n"
] | class Worker:
"""
Main class for running jobs.
:param functions: list of functions to register, can either be raw coroutine functions or the
result of :func:`arq.worker.func`.
:param cron_jobs: list of cron jobs to run, use :func:`arq.cron.cron` to create them
:param redis_settings: settings for creating a redis connection
:param redis_pool: existing redis pool, generally None
:param burst: whether to stop the worker once all jobs have been run
:param on_startup: coroutine function to run at startup
:param on_shutdown: coroutine function to run at shutdown
:param max_jobs: maximum number of jobs to run at a time
:param job_timeout: default job timeout (max run time)
:param keep_result: default duration to keep job results for
:param poll_delay: duration between polling the queue for new jobs
:param max_tries: default maximum number of times to retry a job
:param health_check_interval: how often to set the health check key
"""
def __init__(
self,
functions: Sequence[Function] = (),
*,
cron_jobs: Optional[Sequence[CronJob]] = None,
redis_settings: RedisSettings = None,
redis_pool: ArqRedis = None,
burst: bool = False,
on_startup: Callable[[Dict], Awaitable] = None,
on_shutdown: Callable[[Dict], Awaitable] = None,
max_jobs: int = 10,
job_timeout: SecondsTimedelta = 300,
keep_result: SecondsTimedelta = 3600,
poll_delay: SecondsTimedelta = 0.5,
max_tries: int = 5,
health_check_interval: SecondsTimedelta = 3600,
ctx: Optional[Dict] = None,
):
self.functions: Dict[str, Union[Function, CronJob]] = {f.name: f for f in map(func, functions)}
self.cron_jobs: List[CronJob] = []
if cron_jobs:
assert all(isinstance(cj, CronJob) for cj in cron_jobs), 'cron_jobs, must be instances of CronJob'
self.cron_jobs = cron_jobs
self.functions.update({cj.name: cj for cj in self.cron_jobs})
assert len(self.functions) > 0, 'at least one function or cron_job must be registered'
self.burst = burst
self.on_startup = on_startup
self.on_shutdown = on_shutdown
self.sem = asyncio.BoundedSemaphore(max_jobs)
self.job_timeout_s = to_seconds(job_timeout)
self.keep_result_s = to_seconds(keep_result)
self.poll_delay_s = to_seconds(poll_delay)
self.max_tries = max_tries
self.health_check_interval = to_seconds(health_check_interval)
self.pool = redis_pool
if self.pool is None:
self.redis_settings = redis_settings or RedisSettings()
else:
self.redis_settings = None
self.tasks = []
self.main_task = None
self.loop = asyncio.get_event_loop()
self.ctx = ctx or {}
max_timeout = max(f.timeout_s or self.job_timeout_s for f in self.functions.values())
self.in_progress_timeout_s = max_timeout + 10
self.jobs_complete = 0
self.jobs_retried = 0
self.jobs_failed = 0
self._last_health_check = 0
self._last_health_check_log = None
self._add_signal_handler(signal.SIGINT, self.handle_sig)
self._add_signal_handler(signal.SIGTERM, self.handle_sig)
self.on_stop = None
def run(self) -> None:
"""
Sync function to run the worker, finally closes worker connections.
"""
self.main_task = self.loop.create_task(self.main())
try:
self.loop.run_until_complete(self.main_task)
except asyncio.CancelledError:
# happens on shutdown, fine
pass
finally:
self.loop.run_until_complete(self.close())
async def run_check(self) -> int:
"""
Run :func:`arq.worker.Worker.async_run`, check for failed jobs and raise :class:`arq.worker.FailedJobs`
if any jobs have failed.
:return: number of completed jobs
"""
await self.async_run()
if self.jobs_failed:
failed_job_results = [r for r in await self.pool.all_job_results() if not r.success]
raise FailedJobs(self.jobs_failed, failed_job_results)
else:
return self.jobs_complete
async def main(self):
if self.pool is None:
self.pool = await create_pool(self.redis_settings)
logger.info('Starting worker for %d functions: %s', len(self.functions), ', '.join(self.functions))
await log_redis_info(self.pool, logger.info)
self.ctx['redis'] = self.pool
if self.on_startup:
await self.on_startup(self.ctx)
async for _ in poll(self.poll_delay_s): # noqa F841
async with self.sem: # don't both with zrangebyscore until we have "space" to run the jobs
now = timestamp_ms()
job_ids = await self.pool.zrangebyscore(queue_name, max=now)
await self.run_jobs(job_ids)
# required to make sure errors in run_job get propagated
for t in self.tasks:
if t.done():
self.tasks.remove(t)
t.result()
await self.heart_beat()
if self.burst:
queued_jobs = await self.pool.zcard(queue_name)
if queued_jobs == 0:
return
async def run_jobs(self, job_ids):
for job_id in job_ids:
await self.sem.acquire()
in_progress_key = in_progress_key_prefix + job_id
with await self.pool as conn:
_, _, ongoing_exists, score = await asyncio.gather(
conn.unwatch(),
conn.watch(in_progress_key),
conn.exists(in_progress_key),
conn.zscore(queue_name, job_id),
)
if ongoing_exists or not score:
# job already started elsewhere, or already finished and removed from queue
self.sem.release()
continue
tr = conn.multi_exec()
tr.setex(in_progress_key, self.in_progress_timeout_s, b'1')
try:
await tr.execute()
except MultiExecError:
# job already started elsewhere since we got 'existing'
self.sem.release()
else:
self.tasks.append(self.loop.create_task(self.run_job(job_id, score)))
async def run_job(self, job_id, score): # noqa: C901
v, job_try, _ = await asyncio.gather(
self.pool.get(job_key_prefix + job_id, encoding=None),
self.pool.incr(retry_key_prefix + job_id),
self.pool.expire(retry_key_prefix + job_id, 88400),
)
if not v:
logger.warning('job %s expired', job_id)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
function_name, args, kwargs, enqueue_job_try, enqueue_time_ms = unpickle_job_raw(v)
try:
function: Union[Function, CronJob] = self.functions[function_name]
except KeyError:
logger.warning('job %s, function %r not found', job_id, function_name)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
if hasattr(function, 'next_run'):
# cron_job
ref = function_name
else:
ref = f'{job_id}:{function_name}'
if enqueue_job_try and enqueue_job_try > job_try:
job_try = enqueue_job_try
await self.pool.setex(retry_key_prefix + job_id, 88400, str(job_try))
max_tries = self.max_tries if function.max_tries is None else function.max_tries
if job_try > max_tries:
t = (timestamp_ms() - enqueue_time_ms) / 1000
logger.warning('%6.2fs ! %s max retries %d exceeded', t, ref, max_tries)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
result = no_result
exc_extra = None
finish = False
timeout_s = self.job_timeout_s if function.timeout_s is None else function.timeout_s
incr_score = None
job_ctx = {
'job_id': job_id,
'job_try': job_try,
'enqueue_time': ms_to_datetime(enqueue_time_ms),
'score': score,
}
ctx = {**self.ctx, **job_ctx}
start_ms = timestamp_ms()
success = False
try:
s = args_to_string(args, kwargs)
extra = f' try={job_try}' if job_try > 1 else ''
if (start_ms - score) > 1200:
extra += f' delayed={(start_ms - score) / 1000:0.2f}s'
logger.info('%6.2fs → %s(%s)%s', (start_ms - enqueue_time_ms) / 1000, ref, s, extra)
# run repr(result) and extra inside try/except as they can raise exceptions
try:
async with async_timeout.timeout(timeout_s):
result = await function.coroutine(ctx, *args, **kwargs)
except Exception as e:
exc_extra = getattr(e, 'extra', None)
if callable(exc_extra):
exc_extra = exc_extra()
raise
else:
result_str = '' if result is None else truncate(repr(result))
except Exception as e:
finished_ms = timestamp_ms()
t = (finished_ms - start_ms) / 1000
if isinstance(e, Retry):
incr_score = e.defer_score
logger.info('%6.2fs ↻ %s retrying job in %0.2fs', t, ref, (e.defer_score or 0) / 1000)
if e.defer_score:
incr_score = e.defer_score + (timestamp_ms() - score)
self.jobs_retried += 1
elif isinstance(e, asyncio.CancelledError):
logger.info('%6.2fs ↻ %s cancelled, will be run again', t, ref)
self.jobs_retried += 1
else:
logger.exception(
'%6.2fs ! %s failed, %s: %s', t, ref, e.__class__.__name__, e, extra={'extra': exc_extra}
)
result = e
finish = True
self.jobs_failed += 1
else:
success = True
finished_ms = timestamp_ms()
logger.info('%6.2fs ← %s ● %s', (finished_ms - start_ms) / 1000, ref, result_str)
finish = True
self.jobs_complete += 1
result_timeout_s = self.keep_result_s if function.keep_result_s is None else function.keep_result_s
result_data = None
if result is not no_result and result_timeout_s > 0:
result_data = pickle_result(
function_name, args, kwargs, job_try, enqueue_time_ms, success, result, start_ms, finished_ms, ref
)
await asyncio.shield(self.finish_job(job_id, finish, result_data, result_timeout_s, incr_score))
async def finish_job(self, job_id, finish, result_data, result_timeout_s, incr_score):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
delete_keys = [in_progress_key_prefix + job_id]
if finish:
if result_data:
tr.setex(result_key_prefix + job_id, result_timeout_s, result_data)
delete_keys += [retry_key_prefix + job_id, job_key_prefix + job_id]
tr.zrem(queue_name, job_id)
elif incr_score:
tr.zincrby(queue_name, incr_score, job_id)
tr.delete(*delete_keys)
await tr.execute()
self.sem.release()
async def abort_job(self, job_id):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
tr.delete(retry_key_prefix + job_id, in_progress_key_prefix + job_id, job_key_prefix + job_id)
tr.zrem(queue_name, job_id)
await tr.execute()
async def heart_beat(self):
await self.record_health()
await self.run_cron()
async def run_cron(self):
n = datetime.now()
job_futures = set()
for cron_job in self.cron_jobs:
if cron_job.next_run is None:
if cron_job.run_at_startup:
cron_job.next_run = n
else:
cron_job.set_next(n)
if n >= cron_job.next_run:
job_id = f'{cron_job.name}:{to_unix_ms(cron_job.next_run)}' if cron_job.unique else None
job_futures.add(self.pool.enqueue_job(cron_job.name, _job_id=job_id))
cron_job.set_next(n)
job_futures and await asyncio.gather(*job_futures)
async def record_health(self):
now_ts = time()
if (now_ts - self._last_health_check) < self.health_check_interval:
return
self._last_health_check = now_ts
pending_tasks = sum(not t.done() for t in self.tasks)
queued = await self.pool.zcard(queue_name)
info = (
f'{datetime.now():%b-%d %H:%M:%S} j_complete={self.jobs_complete} j_failed={self.jobs_failed} '
f'j_retried={self.jobs_retried} j_ongoing={pending_tasks} queued={queued}'
)
await self.pool.setex(health_check_key, self.health_check_interval + 1, info.encode())
log_suffix = info[info.index('j_complete=') :]
if self._last_health_check_log and log_suffix != self._last_health_check_log:
logger.info('recording health: %s', info)
self._last_health_check_log = log_suffix
elif not self._last_health_check_log:
self._last_health_check_log = log_suffix
def _add_signal_handler(self, signal, handler):
self.loop.add_signal_handler(signal, partial(handler, signal))
def handle_sig(self, signum):
sig = Signals(signum)
logger.info(
'shutdown on %s ◆ %d jobs complete ◆ %d failed ◆ %d retries ◆ %d ongoing to cancel',
sig.name,
self.jobs_complete,
self.jobs_failed,
self.jobs_retried,
len(self.tasks),
)
for t in self.tasks:
if not t.done():
t.cancel()
self.main_task and self.main_task.cancel()
self.on_stop and self.on_stop(sig)
async def close(self):
if not self.pool:
return
await asyncio.gather(*self.tasks)
await self.pool.delete(health_check_key)
if self.on_shutdown:
await self.on_shutdown(self.ctx)
self.pool.close()
await self.pool.wait_closed()
self.pool = None
def __repr__(self):
return (
f'<Worker j_complete={self.jobs_complete} j_failed={self.jobs_failed} j_retried={self.jobs_retried} '
f'j_ongoing={sum(not t.done() for t in self.tasks)}>'
)
|
samuelcolvin/arq | arq/worker.py | Worker.run_check | python | async def run_check(self) -> int:
await self.async_run()
if self.jobs_failed:
failed_job_results = [r for r in await self.pool.all_job_results() if not r.success]
raise FailedJobs(self.jobs_failed, failed_job_results)
else:
return self.jobs_complete | Run :func:`arq.worker.Worker.async_run`, check for failed jobs and raise :class:`arq.worker.FailedJobs`
if any jobs have failed.
:return: number of completed jobs | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/worker.py#L213-L225 | [
"async def async_run(self) -> None:\n \"\"\"\n Asynchronously run the worker, does not close connections. Useful when testing.\n \"\"\"\n self.main_task = self.loop.create_task(self.main())\n await self.main_task\n"
] | class Worker:
"""
Main class for running jobs.
:param functions: list of functions to register, can either be raw coroutine functions or the
result of :func:`arq.worker.func`.
:param cron_jobs: list of cron jobs to run, use :func:`arq.cron.cron` to create them
:param redis_settings: settings for creating a redis connection
:param redis_pool: existing redis pool, generally None
:param burst: whether to stop the worker once all jobs have been run
:param on_startup: coroutine function to run at startup
:param on_shutdown: coroutine function to run at shutdown
:param max_jobs: maximum number of jobs to run at a time
:param job_timeout: default job timeout (max run time)
:param keep_result: default duration to keep job results for
:param poll_delay: duration between polling the queue for new jobs
:param max_tries: default maximum number of times to retry a job
:param health_check_interval: how often to set the health check key
"""
def __init__(
self,
functions: Sequence[Function] = (),
*,
cron_jobs: Optional[Sequence[CronJob]] = None,
redis_settings: RedisSettings = None,
redis_pool: ArqRedis = None,
burst: bool = False,
on_startup: Callable[[Dict], Awaitable] = None,
on_shutdown: Callable[[Dict], Awaitable] = None,
max_jobs: int = 10,
job_timeout: SecondsTimedelta = 300,
keep_result: SecondsTimedelta = 3600,
poll_delay: SecondsTimedelta = 0.5,
max_tries: int = 5,
health_check_interval: SecondsTimedelta = 3600,
ctx: Optional[Dict] = None,
):
self.functions: Dict[str, Union[Function, CronJob]] = {f.name: f for f in map(func, functions)}
self.cron_jobs: List[CronJob] = []
if cron_jobs:
assert all(isinstance(cj, CronJob) for cj in cron_jobs), 'cron_jobs, must be instances of CronJob'
self.cron_jobs = cron_jobs
self.functions.update({cj.name: cj for cj in self.cron_jobs})
assert len(self.functions) > 0, 'at least one function or cron_job must be registered'
self.burst = burst
self.on_startup = on_startup
self.on_shutdown = on_shutdown
self.sem = asyncio.BoundedSemaphore(max_jobs)
self.job_timeout_s = to_seconds(job_timeout)
self.keep_result_s = to_seconds(keep_result)
self.poll_delay_s = to_seconds(poll_delay)
self.max_tries = max_tries
self.health_check_interval = to_seconds(health_check_interval)
self.pool = redis_pool
if self.pool is None:
self.redis_settings = redis_settings or RedisSettings()
else:
self.redis_settings = None
self.tasks = []
self.main_task = None
self.loop = asyncio.get_event_loop()
self.ctx = ctx or {}
max_timeout = max(f.timeout_s or self.job_timeout_s for f in self.functions.values())
self.in_progress_timeout_s = max_timeout + 10
self.jobs_complete = 0
self.jobs_retried = 0
self.jobs_failed = 0
self._last_health_check = 0
self._last_health_check_log = None
self._add_signal_handler(signal.SIGINT, self.handle_sig)
self._add_signal_handler(signal.SIGTERM, self.handle_sig)
self.on_stop = None
def run(self) -> None:
"""
Sync function to run the worker, finally closes worker connections.
"""
self.main_task = self.loop.create_task(self.main())
try:
self.loop.run_until_complete(self.main_task)
except asyncio.CancelledError:
# happens on shutdown, fine
pass
finally:
self.loop.run_until_complete(self.close())
async def async_run(self) -> None:
"""
Asynchronously run the worker, does not close connections. Useful when testing.
"""
self.main_task = self.loop.create_task(self.main())
await self.main_task
async def main(self):
if self.pool is None:
self.pool = await create_pool(self.redis_settings)
logger.info('Starting worker for %d functions: %s', len(self.functions), ', '.join(self.functions))
await log_redis_info(self.pool, logger.info)
self.ctx['redis'] = self.pool
if self.on_startup:
await self.on_startup(self.ctx)
async for _ in poll(self.poll_delay_s): # noqa F841
async with self.sem: # don't both with zrangebyscore until we have "space" to run the jobs
now = timestamp_ms()
job_ids = await self.pool.zrangebyscore(queue_name, max=now)
await self.run_jobs(job_ids)
# required to make sure errors in run_job get propagated
for t in self.tasks:
if t.done():
self.tasks.remove(t)
t.result()
await self.heart_beat()
if self.burst:
queued_jobs = await self.pool.zcard(queue_name)
if queued_jobs == 0:
return
async def run_jobs(self, job_ids):
for job_id in job_ids:
await self.sem.acquire()
in_progress_key = in_progress_key_prefix + job_id
with await self.pool as conn:
_, _, ongoing_exists, score = await asyncio.gather(
conn.unwatch(),
conn.watch(in_progress_key),
conn.exists(in_progress_key),
conn.zscore(queue_name, job_id),
)
if ongoing_exists or not score:
# job already started elsewhere, or already finished and removed from queue
self.sem.release()
continue
tr = conn.multi_exec()
tr.setex(in_progress_key, self.in_progress_timeout_s, b'1')
try:
await tr.execute()
except MultiExecError:
# job already started elsewhere since we got 'existing'
self.sem.release()
else:
self.tasks.append(self.loop.create_task(self.run_job(job_id, score)))
async def run_job(self, job_id, score): # noqa: C901
v, job_try, _ = await asyncio.gather(
self.pool.get(job_key_prefix + job_id, encoding=None),
self.pool.incr(retry_key_prefix + job_id),
self.pool.expire(retry_key_prefix + job_id, 88400),
)
if not v:
logger.warning('job %s expired', job_id)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
function_name, args, kwargs, enqueue_job_try, enqueue_time_ms = unpickle_job_raw(v)
try:
function: Union[Function, CronJob] = self.functions[function_name]
except KeyError:
logger.warning('job %s, function %r not found', job_id, function_name)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
if hasattr(function, 'next_run'):
# cron_job
ref = function_name
else:
ref = f'{job_id}:{function_name}'
if enqueue_job_try and enqueue_job_try > job_try:
job_try = enqueue_job_try
await self.pool.setex(retry_key_prefix + job_id, 88400, str(job_try))
max_tries = self.max_tries if function.max_tries is None else function.max_tries
if job_try > max_tries:
t = (timestamp_ms() - enqueue_time_ms) / 1000
logger.warning('%6.2fs ! %s max retries %d exceeded', t, ref, max_tries)
self.jobs_failed += 1
return await asyncio.shield(self.abort_job(job_id))
result = no_result
exc_extra = None
finish = False
timeout_s = self.job_timeout_s if function.timeout_s is None else function.timeout_s
incr_score = None
job_ctx = {
'job_id': job_id,
'job_try': job_try,
'enqueue_time': ms_to_datetime(enqueue_time_ms),
'score': score,
}
ctx = {**self.ctx, **job_ctx}
start_ms = timestamp_ms()
success = False
try:
s = args_to_string(args, kwargs)
extra = f' try={job_try}' if job_try > 1 else ''
if (start_ms - score) > 1200:
extra += f' delayed={(start_ms - score) / 1000:0.2f}s'
logger.info('%6.2fs → %s(%s)%s', (start_ms - enqueue_time_ms) / 1000, ref, s, extra)
# run repr(result) and extra inside try/except as they can raise exceptions
try:
async with async_timeout.timeout(timeout_s):
result = await function.coroutine(ctx, *args, **kwargs)
except Exception as e:
exc_extra = getattr(e, 'extra', None)
if callable(exc_extra):
exc_extra = exc_extra()
raise
else:
result_str = '' if result is None else truncate(repr(result))
except Exception as e:
finished_ms = timestamp_ms()
t = (finished_ms - start_ms) / 1000
if isinstance(e, Retry):
incr_score = e.defer_score
logger.info('%6.2fs ↻ %s retrying job in %0.2fs', t, ref, (e.defer_score or 0) / 1000)
if e.defer_score:
incr_score = e.defer_score + (timestamp_ms() - score)
self.jobs_retried += 1
elif isinstance(e, asyncio.CancelledError):
logger.info('%6.2fs ↻ %s cancelled, will be run again', t, ref)
self.jobs_retried += 1
else:
logger.exception(
'%6.2fs ! %s failed, %s: %s', t, ref, e.__class__.__name__, e, extra={'extra': exc_extra}
)
result = e
finish = True
self.jobs_failed += 1
else:
success = True
finished_ms = timestamp_ms()
logger.info('%6.2fs ← %s ● %s', (finished_ms - start_ms) / 1000, ref, result_str)
finish = True
self.jobs_complete += 1
result_timeout_s = self.keep_result_s if function.keep_result_s is None else function.keep_result_s
result_data = None
if result is not no_result and result_timeout_s > 0:
result_data = pickle_result(
function_name, args, kwargs, job_try, enqueue_time_ms, success, result, start_ms, finished_ms, ref
)
await asyncio.shield(self.finish_job(job_id, finish, result_data, result_timeout_s, incr_score))
async def finish_job(self, job_id, finish, result_data, result_timeout_s, incr_score):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
delete_keys = [in_progress_key_prefix + job_id]
if finish:
if result_data:
tr.setex(result_key_prefix + job_id, result_timeout_s, result_data)
delete_keys += [retry_key_prefix + job_id, job_key_prefix + job_id]
tr.zrem(queue_name, job_id)
elif incr_score:
tr.zincrby(queue_name, incr_score, job_id)
tr.delete(*delete_keys)
await tr.execute()
self.sem.release()
async def abort_job(self, job_id):
with await self.pool as conn:
await conn.unwatch()
tr = conn.multi_exec()
tr.delete(retry_key_prefix + job_id, in_progress_key_prefix + job_id, job_key_prefix + job_id)
tr.zrem(queue_name, job_id)
await tr.execute()
async def heart_beat(self):
await self.record_health()
await self.run_cron()
async def run_cron(self):
n = datetime.now()
job_futures = set()
for cron_job in self.cron_jobs:
if cron_job.next_run is None:
if cron_job.run_at_startup:
cron_job.next_run = n
else:
cron_job.set_next(n)
if n >= cron_job.next_run:
job_id = f'{cron_job.name}:{to_unix_ms(cron_job.next_run)}' if cron_job.unique else None
job_futures.add(self.pool.enqueue_job(cron_job.name, _job_id=job_id))
cron_job.set_next(n)
job_futures and await asyncio.gather(*job_futures)
async def record_health(self):
now_ts = time()
if (now_ts - self._last_health_check) < self.health_check_interval:
return
self._last_health_check = now_ts
pending_tasks = sum(not t.done() for t in self.tasks)
queued = await self.pool.zcard(queue_name)
info = (
f'{datetime.now():%b-%d %H:%M:%S} j_complete={self.jobs_complete} j_failed={self.jobs_failed} '
f'j_retried={self.jobs_retried} j_ongoing={pending_tasks} queued={queued}'
)
await self.pool.setex(health_check_key, self.health_check_interval + 1, info.encode())
log_suffix = info[info.index('j_complete=') :]
if self._last_health_check_log and log_suffix != self._last_health_check_log:
logger.info('recording health: %s', info)
self._last_health_check_log = log_suffix
elif not self._last_health_check_log:
self._last_health_check_log = log_suffix
def _add_signal_handler(self, signal, handler):
self.loop.add_signal_handler(signal, partial(handler, signal))
def handle_sig(self, signum):
sig = Signals(signum)
logger.info(
'shutdown on %s ◆ %d jobs complete ◆ %d failed ◆ %d retries ◆ %d ongoing to cancel',
sig.name,
self.jobs_complete,
self.jobs_failed,
self.jobs_retried,
len(self.tasks),
)
for t in self.tasks:
if not t.done():
t.cancel()
self.main_task and self.main_task.cancel()
self.on_stop and self.on_stop(sig)
async def close(self):
if not self.pool:
return
await asyncio.gather(*self.tasks)
await self.pool.delete(health_check_key)
if self.on_shutdown:
await self.on_shutdown(self.ctx)
self.pool.close()
await self.pool.wait_closed()
self.pool = None
def __repr__(self):
return (
f'<Worker j_complete={self.jobs_complete} j_failed={self.jobs_failed} j_retried={self.jobs_retried} '
f'j_ongoing={sum(not t.done() for t in self.tasks)}>'
)
|
samuelcolvin/arq | docs/examples/job_results.py | main | python | async def main():
redis = await create_pool(RedisSettings())
job = await redis.enqueue_job('the_task')
# get the job's id
print(job.job_id)
# get information about the job, will include results if the job has finished, but
# doesn't await the job's result
debug(await job.info())
"""
> docs/examples/job_results.py:23 main
JobDef(
function='the_task',
args=(),
kwargs={},
job_try=None,
enqueue_time=datetime.datetime(2019, 4, 23, 13, 58, 56, 781000),
score=1556027936781
) (JobDef)
"""
# get the Job's status
print(await job.status())
"""
> JobStatus.queued
"""
# poll redis for the job result, if the job raised an exception,
# it will be raised here
# (You'll need the worker running at the same time to get a result here)
print(await job.result(timeout=5))
"""
> 42
""" | > 68362958a244465b9be909db4b7b5ab4 (or whatever) | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/docs/examples/job_results.py#L12-L50 | [
"async def create_pool(settings: RedisSettings = None, *, _retry: int = 0) -> ArqRedis:\n \"\"\"\n Create a new redis pool, retrying up to ``conn_retries`` times if the connection fails.\n\n Similar to ``aioredis.create_redis_pool`` except it returns a :class:`arq.connections.ArqRedis` instance,\n thus allowing job enqueuing.\n \"\"\"\n settings = settings or RedisSettings()\n addr = settings.host, settings.port\n try:\n pool = await aioredis.create_redis_pool(\n addr,\n db=settings.database,\n password=settings.password,\n timeout=settings.conn_timeout,\n encoding='utf8',\n commands_factory=ArqRedis,\n )\n except (ConnectionError, OSError, aioredis.RedisError, asyncio.TimeoutError) as e:\n if _retry < settings.conn_retries:\n logger.warning(\n 'redis connection error %s:%s %s %s, %d retries remaining...',\n settings.host,\n settings.port,\n e.__class__.__name__,\n e,\n settings.conn_retries - _retry,\n )\n await asyncio.sleep(settings.conn_retry_delay)\n else:\n raise\n else:\n if _retry > 0:\n logger.info('redis connection successful')\n return pool\n\n # recursively attempt to create the pool outside the except block to avoid\n # \"During handling of the above exception...\" madness\n return await create_pool(settings, _retry=_retry + 1)\n"
] | import asyncio
from arq import create_pool
from arq.connections import RedisSettings
# requires `pip install devtools`, used for pretty printing of job info
from devtools import debug
async def the_task(ctx):
print('running the task')
return 42
class WorkerSettings:
functions = [the_task]
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
|
samuelcolvin/arq | arq/jobs.py | Job.result | python | async def result(self, timeout: Optional[float] = None, *, pole_delay: float = 0.5) -> Any:
async for delay in poll(pole_delay):
info = await self.result_info()
if info:
result = info.result
if info.success:
return result
else:
raise result
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError() | Get the result of the job, including waiting if it's not yet available. If the job raised an exception,
it will be raised here.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param pole_delay: how often to poll redis for the job result | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/jobs.py#L62-L79 | [
"async def poll(step: float = 0.5):\n loop = asyncio.get_event_loop()\n start = loop.time()\n while True:\n before = loop.time()\n yield before - start\n after = loop.time()\n wait = max([0, step - after + before])\n await asyncio.sleep(wait)\n",
"async def result_info(self) -> Optional[JobResult]:\n \"\"\"\n Information about the job result if available, does not wait for the result. Does not raise an exception\n even if the job raised one.\n \"\"\"\n v = await self._redis.get(result_key_prefix + self.job_id, encoding=None)\n if v:\n return unpickle_result(v)\n"
] | class Job:
"""
Holds data a reference to a job.
"""
__slots__ = 'job_id', '_redis'
def __init__(self, job_id: str, redis):
self.job_id = job_id
self._redis = redis
async def info(self) -> Optional[JobDef]:
"""
All information on a job, including its result if it's available, does not wait for the result.
"""
info = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id, encoding=None)
if v:
info = unpickle_job(v)
if info:
info.score = await self._redis.zscore(queue_name, self.job_id)
return info
async def result_info(self) -> Optional[JobResult]:
"""
Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one.
"""
v = await self._redis.get(result_key_prefix + self.job_id, encoding=None)
if v:
return unpickle_result(v)
async def status(self) -> JobStatus:
"""
Status of the job.
"""
if await self._redis.exists(result_key_prefix + self.job_id):
return JobStatus.complete
elif await self._redis.exists(in_progress_key_prefix + self.job_id):
return JobStatus.in_progress
else:
score = await self._redis.zscore(queue_name, self.job_id)
if not score:
return JobStatus.not_found
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued
def __repr__(self):
return f'<arq job {self.job_id}>'
|
samuelcolvin/arq | arq/jobs.py | Job.info | python | async def info(self) -> Optional[JobDef]:
info = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id, encoding=None)
if v:
info = unpickle_job(v)
if info:
info.score = await self._redis.zscore(queue_name, self.job_id)
return info | All information on a job, including its result if it's available, does not wait for the result. | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/jobs.py#L81-L92 | [
"async def result_info(self) -> Optional[JobResult]:\n \"\"\"\n Information about the job result if available, does not wait for the result. Does not raise an exception\n even if the job raised one.\n \"\"\"\n v = await self._redis.get(result_key_prefix + self.job_id, encoding=None)\n if v:\n return unpickle_result(v)\n"
] | class Job:
"""
Holds data a reference to a job.
"""
__slots__ = 'job_id', '_redis'
def __init__(self, job_id: str, redis):
self.job_id = job_id
self._redis = redis
async def result(self, timeout: Optional[float] = None, *, pole_delay: float = 0.5) -> Any:
"""
Get the result of the job, including waiting if it's not yet available. If the job raised an exception,
it will be raised here.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param pole_delay: how often to poll redis for the job result
"""
async for delay in poll(pole_delay):
info = await self.result_info()
if info:
result = info.result
if info.success:
return result
else:
raise result
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError()
async def result_info(self) -> Optional[JobResult]:
"""
Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one.
"""
v = await self._redis.get(result_key_prefix + self.job_id, encoding=None)
if v:
return unpickle_result(v)
async def status(self) -> JobStatus:
"""
Status of the job.
"""
if await self._redis.exists(result_key_prefix + self.job_id):
return JobStatus.complete
elif await self._redis.exists(in_progress_key_prefix + self.job_id):
return JobStatus.in_progress
else:
score = await self._redis.zscore(queue_name, self.job_id)
if not score:
return JobStatus.not_found
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued
def __repr__(self):
return f'<arq job {self.job_id}>'
|
samuelcolvin/arq | arq/jobs.py | Job.result_info | python | async def result_info(self) -> Optional[JobResult]:
v = await self._redis.get(result_key_prefix + self.job_id, encoding=None)
if v:
return unpickle_result(v) | Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one. | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/jobs.py#L94-L101 | null | class Job:
"""
Holds data a reference to a job.
"""
__slots__ = 'job_id', '_redis'
def __init__(self, job_id: str, redis):
self.job_id = job_id
self._redis = redis
async def result(self, timeout: Optional[float] = None, *, pole_delay: float = 0.5) -> Any:
"""
Get the result of the job, including waiting if it's not yet available. If the job raised an exception,
it will be raised here.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param pole_delay: how often to poll redis for the job result
"""
async for delay in poll(pole_delay):
info = await self.result_info()
if info:
result = info.result
if info.success:
return result
else:
raise result
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError()
async def info(self) -> Optional[JobDef]:
"""
All information on a job, including its result if it's available, does not wait for the result.
"""
info = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id, encoding=None)
if v:
info = unpickle_job(v)
if info:
info.score = await self._redis.zscore(queue_name, self.job_id)
return info
async def status(self) -> JobStatus:
"""
Status of the job.
"""
if await self._redis.exists(result_key_prefix + self.job_id):
return JobStatus.complete
elif await self._redis.exists(in_progress_key_prefix + self.job_id):
return JobStatus.in_progress
else:
score = await self._redis.zscore(queue_name, self.job_id)
if not score:
return JobStatus.not_found
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued
def __repr__(self):
return f'<arq job {self.job_id}>'
|
samuelcolvin/arq | arq/jobs.py | Job.status | python | async def status(self) -> JobStatus:
if await self._redis.exists(result_key_prefix + self.job_id):
return JobStatus.complete
elif await self._redis.exists(in_progress_key_prefix + self.job_id):
return JobStatus.in_progress
else:
score = await self._redis.zscore(queue_name, self.job_id)
if not score:
return JobStatus.not_found
return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued | Status of the job. | train | https://github.com/samuelcolvin/arq/blob/1434646b48c45bd27e392f0162976404e4d8021d/arq/jobs.py#L103-L115 | null | class Job:
"""
Holds data a reference to a job.
"""
__slots__ = 'job_id', '_redis'
def __init__(self, job_id: str, redis):
self.job_id = job_id
self._redis = redis
async def result(self, timeout: Optional[float] = None, *, pole_delay: float = 0.5) -> Any:
"""
Get the result of the job, including waiting if it's not yet available. If the job raised an exception,
it will be raised here.
:param timeout: maximum time to wait for the job result before raising ``TimeoutError``, will wait forever
:param pole_delay: how often to poll redis for the job result
"""
async for delay in poll(pole_delay):
info = await self.result_info()
if info:
result = info.result
if info.success:
return result
else:
raise result
if timeout is not None and delay > timeout:
raise asyncio.TimeoutError()
async def info(self) -> Optional[JobDef]:
"""
All information on a job, including its result if it's available, does not wait for the result.
"""
info = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id, encoding=None)
if v:
info = unpickle_job(v)
if info:
info.score = await self._redis.zscore(queue_name, self.job_id)
return info
async def result_info(self) -> Optional[JobResult]:
"""
Information about the job result if available, does not wait for the result. Does not raise an exception
even if the job raised one.
"""
v = await self._redis.get(result_key_prefix + self.job_id, encoding=None)
if v:
return unpickle_result(v)
def __repr__(self):
return f'<arq job {self.job_id}>'
|
iancmcc/ouimeaux | ouimeaux/pysignals/inspect.py | get_func_full_args | python | def get_func_full_args(func):
if six.PY2:
argspec = inspect.getargspec(func)
args = argspec.args[1:] # ignore 'self'
defaults = argspec.defaults or []
# Split args into two lists depending on whether they have default value
no_default = args[:len(args) - len(defaults)]
with_default = args[len(args) - len(defaults):]
# Join the two lists and combine it with default values
args = [(arg,) for arg in no_default] + zip(with_default, defaults)
# Add possible *args and **kwargs and prepend them with '*' or '**'
varargs = [('*' + argspec.varargs,)] if argspec.varargs else []
kwargs = [('**' + argspec.keywords,)] if argspec.keywords else []
return args + varargs + kwargs
sig = inspect.signature(func)
args = []
for arg_name, param in sig.parameters.items():
name = arg_name
# Ignore 'self'
if name == 'self':
continue
if param.kind == inspect.Parameter.VAR_POSITIONAL:
name = '*' + name
elif param.kind == inspect.Parameter.VAR_KEYWORD:
name = '**' + name
if param.default != inspect.Parameter.empty:
args.append((name, param.default))
else:
args.append((name,))
return args | Return a list of (argument name, default value) tuples. If the argument
does not have a default value, omit it in the tuple. Arguments such as
*args and **kwargs are also included. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/pysignals/inspect.py#L46-L81 | null | from __future__ import absolute_import
import inspect
import six
def getargspec(func):
if six.PY2:
return inspect.getargspec(func)
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty
] or None
return args, varargs, varkw, defaults
def get_func_args(func):
if six.PY2:
argspec = inspect.getargspec(func)
return argspec.args[1:] # ignore 'self'
sig = inspect.signature(func)
return [
arg_name for arg_name, param in sig.parameters.items()
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
def func_accepts_kwargs(func):
if six.PY2:
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(func)
except TypeError:
try:
argspec = inspect.getargspec(func.__call__)
except (TypeError, AttributeError):
argspec = None
return not argspec or argspec[2] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_KEYWORD
)
def func_accepts_var_args(func):
"""
Return True if function 'func' accepts positional arguments *args.
"""
if six.PY2:
return inspect.getargspec(func)[1] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_POSITIONAL
)
def func_has_no_args(func):
args = inspect.getargspec(func)[0] if six.PY2 else [
p for p in inspect.signature(func).parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD
]
return len(args) == 1
def func_supports_parameter(func, parameter):
if six.PY3:
return parameter in inspect.signature(func).parameters
else:
args, varargs, varkw, defaults = inspect.getargspec(func)
return parameter in args
|
iancmcc/ouimeaux | ouimeaux/pysignals/inspect.py | func_accepts_var_args | python | def func_accepts_var_args(func):
if six.PY2:
return inspect.getargspec(func)[1] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_POSITIONAL
) | Return True if function 'func' accepts positional arguments *args. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/pysignals/inspect.py#L105-L115 | null | from __future__ import absolute_import
import inspect
import six
def getargspec(func):
if six.PY2:
return inspect.getargspec(func)
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
varargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = [
p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty
] or None
return args, varargs, varkw, defaults
def get_func_args(func):
if six.PY2:
argspec = inspect.getargspec(func)
return argspec.args[1:] # ignore 'self'
sig = inspect.signature(func)
return [
arg_name for arg_name, param in sig.parameters.items()
if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
def get_func_full_args(func):
"""
Return a list of (argument name, default value) tuples. If the argument
does not have a default value, omit it in the tuple. Arguments such as
*args and **kwargs are also included.
"""
if six.PY2:
argspec = inspect.getargspec(func)
args = argspec.args[1:] # ignore 'self'
defaults = argspec.defaults or []
# Split args into two lists depending on whether they have default value
no_default = args[:len(args) - len(defaults)]
with_default = args[len(args) - len(defaults):]
# Join the two lists and combine it with default values
args = [(arg,) for arg in no_default] + zip(with_default, defaults)
# Add possible *args and **kwargs and prepend them with '*' or '**'
varargs = [('*' + argspec.varargs,)] if argspec.varargs else []
kwargs = [('**' + argspec.keywords,)] if argspec.keywords else []
return args + varargs + kwargs
sig = inspect.signature(func)
args = []
for arg_name, param in sig.parameters.items():
name = arg_name
# Ignore 'self'
if name == 'self':
continue
if param.kind == inspect.Parameter.VAR_POSITIONAL:
name = '*' + name
elif param.kind == inspect.Parameter.VAR_KEYWORD:
name = '**' + name
if param.default != inspect.Parameter.empty:
args.append((name, param.default))
else:
args.append((name,))
return args
def func_accepts_kwargs(func):
if six.PY2:
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(func)
except TypeError:
try:
argspec = inspect.getargspec(func.__call__)
except (TypeError, AttributeError):
argspec = None
return not argspec or argspec[2] is not None
return any(
p for p in inspect.signature(func).parameters.values()
if p.kind == p.VAR_KEYWORD
)
def func_has_no_args(func):
args = inspect.getargspec(func)[0] if six.PY2 else [
p for p in inspect.signature(func).parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD
]
return len(args) == 1
def func_supports_parameter(func, parameter):
if six.PY3:
return parameter in inspect.signature(func).parameters
else:
args, varargs, varkw, defaults = inspect.getargspec(func)
return parameter in args
|
iancmcc/ouimeaux | ouimeaux/environment.py | Environment.start | python | def start(self):
if self._with_discovery:
# Start the server to listen to new devices
self.upnp.server.set_spawn(2)
self.upnp.server.start()
if self._with_subscribers:
# Start the server to listen to events
self.registry.server.set_spawn(2)
self.registry.server.start() | Start the server(s) necessary to receive information from devices. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/environment.py#L75-L86 | null | class Environment(object):
def __init__(self, switch_callback=_NOOP, motion_callback=_NOOP, bridge_callback=_NOOP,
maker_callback=_NOOP, with_discovery=True, with_subscribers=True, with_cache=_MARKER,
bind=None, config_filename=None):
"""
Create a WeMo environment.
@param switch_callback: A function to be called when a new switch is
discovered.
@type switch_callback: function
@param motion_callback: A function to be called when a new motion is
discovered.
@type motion_callback: function
@param with_subscribers: Whether to register for events with discovered
devices.
@type with_subscribers: bool
@param bind: ip:port to which to bind the response server.
@type bind: str
"""
if with_cache is not _MARKER:
log.warn("with_cache argument is deprecated (and nonfunctional)")
self._config = WemoConfiguration(filename=config_filename)
self.upnp = UPnP(bind=bind or self._config.bind)
discovered.connect(self._found_device, self.upnp)
self.registry = SubscriptionRegistry()
self._with_discovery = with_discovery
self._with_subscribers = with_subscribers
self._switch_callback = switch_callback
self._motion_callback = motion_callback
self._bridge_callback = bridge_callback
self._maker_callback = maker_callback
self._switches = {}
self._motions = {}
self._bridges = {}
self._makers = {}
self.devices = {}
def __iter__(self):
return self.devices.itervalues()
def wait(self, timeout=None):
"""
Wait for events.
"""
try:
if timeout:
gevent.sleep(timeout)
else:
while True:
gevent.sleep(1000)
except (KeyboardInterrupt, SystemExit, Exception):
pass
def discover(self, seconds=2):
"""
Discover devices in the environment.
@param seconds: Number of seconds to broadcast requests.
@type seconds: int
"""
log.info("Discovering devices")
with gevent.Timeout(seconds, StopBroadcasting) as timeout:
try:
try:
while True:
self.upnp.broadcast()
gevent.sleep(1)
except Exception as e:
raise StopBroadcasting(e)
except StopBroadcasting:
return
def _found_device(self, sender, **kwargs):
address = kwargs['address']
headers = kwargs['headers']
usn = headers['usn']
if usn.startswith('uuid:Socket'):
klass = Switch
elif usn.startswith('uuid:Lightswitch'):
klass = LightSwitch
elif usn.startswith('uuid:Insight'):
klass = Insight
elif usn.startswith('uuid:Sensor'):
klass = Motion
elif usn.startswith('uuid:Bridge'):
klass = Bridge
elif usn.startswith('uuid:Maker'):
klass = Maker
else:
log.info("Unrecognized device type. USN={0}".format(usn))
return
device = klass(headers['location'])
log.info("Found device %r at %s" % (device, address))
self._process_device(device)
def _process_device(self, device):
if isinstance(device, Switch):
callback = self._switch_callback
registry = self._switches
elif isinstance(device, Motion):
callback = self._motion_callback
registry = self._motions
elif isinstance(device, Bridge):
callback = self._bridge_callback
registry = self._bridges
for light in device.Lights:
log.info("Found light \"%s\" connected to \"%s\"" % (light, device.name))
for group in device.Groups:
log.info("Found group \"%s\" connected to \"%s\"" % (group, device.name))
elif isinstance(device, Maker):
callback = self._maker_callback
registry = self._makers
else:
return
self.devices[device.name] = device
registry[device.name] = device
if self._with_subscribers:
self.registry.register(device)
self.registry.on(device, 'BinaryState',
device._update_state)
try:
if isinstance(device, Bridge):
pass
else:
device.ping()
except DeviceUnreachable:
return
devicefound.send(device)
callback(device)
def list_switches(self):
"""
List switches discovered in the environment.
"""
return self._switches.keys()
def list_motions(self):
"""
List motions discovered in the environment.
"""
return self._motions.keys()
def list_makers(self):
"""
List makers discovered in the environment.
"""
return self._makers.keys()
def list_bridges(self):
"""
List bridges discovered in the environment.
"""
return self._bridges.keys()
def get(self, name):
alias = self._config.aliases.get(name)
if alias:
matches = lambda x: x == alias
elif name:
matches = matcher(name)
else:
matches = _NOOP
for k in self.devices:
if matches(k):
return self.devices[k]
else:
raise UnknownDevice(name)
def get_switch(self, name):
"""
Get a switch by name.
"""
try:
return self._switches[name]
except KeyError:
raise UnknownDevice(name)
def get_motion(self, name):
"""
Get a motion by name.
"""
try:
return self._motions[name]
except KeyError:
raise UnknownDevice(name)
def get_bridge(self, name):
"""
Get a bridge by name.
"""
try:
return self._bridges[name]
except KeyError:
raise UnknownDevice(name)
def get_maker(self, name):
"""
Get a maker by name.
"""
try:
return self._makers[name]
except KeyError:
raise UnknownDevice(name)
|
iancmcc/ouimeaux | ouimeaux/environment.py | Environment.wait | python | def wait(self, timeout=None):
try:
if timeout:
gevent.sleep(timeout)
else:
while True:
gevent.sleep(1000)
except (KeyboardInterrupt, SystemExit, Exception):
pass | Wait for events. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/environment.py#L88-L99 | null | class Environment(object):
def __init__(self, switch_callback=_NOOP, motion_callback=_NOOP, bridge_callback=_NOOP,
maker_callback=_NOOP, with_discovery=True, with_subscribers=True, with_cache=_MARKER,
bind=None, config_filename=None):
"""
Create a WeMo environment.
@param switch_callback: A function to be called when a new switch is
discovered.
@type switch_callback: function
@param motion_callback: A function to be called when a new motion is
discovered.
@type motion_callback: function
@param with_subscribers: Whether to register for events with discovered
devices.
@type with_subscribers: bool
@param bind: ip:port to which to bind the response server.
@type bind: str
"""
if with_cache is not _MARKER:
log.warn("with_cache argument is deprecated (and nonfunctional)")
self._config = WemoConfiguration(filename=config_filename)
self.upnp = UPnP(bind=bind or self._config.bind)
discovered.connect(self._found_device, self.upnp)
self.registry = SubscriptionRegistry()
self._with_discovery = with_discovery
self._with_subscribers = with_subscribers
self._switch_callback = switch_callback
self._motion_callback = motion_callback
self._bridge_callback = bridge_callback
self._maker_callback = maker_callback
self._switches = {}
self._motions = {}
self._bridges = {}
self._makers = {}
self.devices = {}
def __iter__(self):
return self.devices.itervalues()
def start(self):
"""
Start the server(s) necessary to receive information from devices.
"""
if self._with_discovery:
# Start the server to listen to new devices
self.upnp.server.set_spawn(2)
self.upnp.server.start()
if self._with_subscribers:
# Start the server to listen to events
self.registry.server.set_spawn(2)
self.registry.server.start()
def discover(self, seconds=2):
"""
Discover devices in the environment.
@param seconds: Number of seconds to broadcast requests.
@type seconds: int
"""
log.info("Discovering devices")
with gevent.Timeout(seconds, StopBroadcasting) as timeout:
try:
try:
while True:
self.upnp.broadcast()
gevent.sleep(1)
except Exception as e:
raise StopBroadcasting(e)
except StopBroadcasting:
return
def _found_device(self, sender, **kwargs):
address = kwargs['address']
headers = kwargs['headers']
usn = headers['usn']
if usn.startswith('uuid:Socket'):
klass = Switch
elif usn.startswith('uuid:Lightswitch'):
klass = LightSwitch
elif usn.startswith('uuid:Insight'):
klass = Insight
elif usn.startswith('uuid:Sensor'):
klass = Motion
elif usn.startswith('uuid:Bridge'):
klass = Bridge
elif usn.startswith('uuid:Maker'):
klass = Maker
else:
log.info("Unrecognized device type. USN={0}".format(usn))
return
device = klass(headers['location'])
log.info("Found device %r at %s" % (device, address))
self._process_device(device)
def _process_device(self, device):
if isinstance(device, Switch):
callback = self._switch_callback
registry = self._switches
elif isinstance(device, Motion):
callback = self._motion_callback
registry = self._motions
elif isinstance(device, Bridge):
callback = self._bridge_callback
registry = self._bridges
for light in device.Lights:
log.info("Found light \"%s\" connected to \"%s\"" % (light, device.name))
for group in device.Groups:
log.info("Found group \"%s\" connected to \"%s\"" % (group, device.name))
elif isinstance(device, Maker):
callback = self._maker_callback
registry = self._makers
else:
return
self.devices[device.name] = device
registry[device.name] = device
if self._with_subscribers:
self.registry.register(device)
self.registry.on(device, 'BinaryState',
device._update_state)
try:
if isinstance(device, Bridge):
pass
else:
device.ping()
except DeviceUnreachable:
return
devicefound.send(device)
callback(device)
def list_switches(self):
"""
List switches discovered in the environment.
"""
return self._switches.keys()
def list_motions(self):
"""
List motions discovered in the environment.
"""
return self._motions.keys()
def list_makers(self):
"""
List makers discovered in the environment.
"""
return self._makers.keys()
def list_bridges(self):
"""
List bridges discovered in the environment.
"""
return self._bridges.keys()
def get(self, name):
alias = self._config.aliases.get(name)
if alias:
matches = lambda x: x == alias
elif name:
matches = matcher(name)
else:
matches = _NOOP
for k in self.devices:
if matches(k):
return self.devices[k]
else:
raise UnknownDevice(name)
def get_switch(self, name):
"""
Get a switch by name.
"""
try:
return self._switches[name]
except KeyError:
raise UnknownDevice(name)
def get_motion(self, name):
"""
Get a motion by name.
"""
try:
return self._motions[name]
except KeyError:
raise UnknownDevice(name)
def get_bridge(self, name):
"""
Get a bridge by name.
"""
try:
return self._bridges[name]
except KeyError:
raise UnknownDevice(name)
def get_maker(self, name):
"""
Get a maker by name.
"""
try:
return self._makers[name]
except KeyError:
raise UnknownDevice(name)
|
iancmcc/ouimeaux | ouimeaux/environment.py | Environment.discover | python | def discover(self, seconds=2):
log.info("Discovering devices")
with gevent.Timeout(seconds, StopBroadcasting) as timeout:
try:
try:
while True:
self.upnp.broadcast()
gevent.sleep(1)
except Exception as e:
raise StopBroadcasting(e)
except StopBroadcasting:
return | Discover devices in the environment.
@param seconds: Number of seconds to broadcast requests.
@type seconds: int | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/environment.py#L101-L118 | [
"def broadcast(self):\n \"\"\"\n Send a multicast M-SEARCH request asking for devices to report in.\n \"\"\"\n log.debug(\"Broadcasting M-SEARCH to %s:%s\", self.mcast_ip, self.mcast_port)\n request = '\\r\\n'.join((\"M-SEARCH * HTTP/1.1\",\n \"HOST:{mcast_ip}:{mcast_port}\",\n \"ST:upnp:rootdevice\",\n \"MX:2\",\n 'MAN:\"ssdp:discover\"',\n \"\", \"\")).format(**self.__dict__)\n self.server.sendto(request.encode(), (self.mcast_ip, self.mcast_port))\n"
] | class Environment(object):
def __init__(self, switch_callback=_NOOP, motion_callback=_NOOP, bridge_callback=_NOOP,
maker_callback=_NOOP, with_discovery=True, with_subscribers=True, with_cache=_MARKER,
bind=None, config_filename=None):
"""
Create a WeMo environment.
@param switch_callback: A function to be called when a new switch is
discovered.
@type switch_callback: function
@param motion_callback: A function to be called when a new motion is
discovered.
@type motion_callback: function
@param with_subscribers: Whether to register for events with discovered
devices.
@type with_subscribers: bool
@param bind: ip:port to which to bind the response server.
@type bind: str
"""
if with_cache is not _MARKER:
log.warn("with_cache argument is deprecated (and nonfunctional)")
self._config = WemoConfiguration(filename=config_filename)
self.upnp = UPnP(bind=bind or self._config.bind)
discovered.connect(self._found_device, self.upnp)
self.registry = SubscriptionRegistry()
self._with_discovery = with_discovery
self._with_subscribers = with_subscribers
self._switch_callback = switch_callback
self._motion_callback = motion_callback
self._bridge_callback = bridge_callback
self._maker_callback = maker_callback
self._switches = {}
self._motions = {}
self._bridges = {}
self._makers = {}
self.devices = {}
def __iter__(self):
return self.devices.itervalues()
def start(self):
"""
Start the server(s) necessary to receive information from devices.
"""
if self._with_discovery:
# Start the server to listen to new devices
self.upnp.server.set_spawn(2)
self.upnp.server.start()
if self._with_subscribers:
# Start the server to listen to events
self.registry.server.set_spawn(2)
self.registry.server.start()
def wait(self, timeout=None):
"""
Wait for events.
"""
try:
if timeout:
gevent.sleep(timeout)
else:
while True:
gevent.sleep(1000)
except (KeyboardInterrupt, SystemExit, Exception):
pass
def _found_device(self, sender, **kwargs):
address = kwargs['address']
headers = kwargs['headers']
usn = headers['usn']
if usn.startswith('uuid:Socket'):
klass = Switch
elif usn.startswith('uuid:Lightswitch'):
klass = LightSwitch
elif usn.startswith('uuid:Insight'):
klass = Insight
elif usn.startswith('uuid:Sensor'):
klass = Motion
elif usn.startswith('uuid:Bridge'):
klass = Bridge
elif usn.startswith('uuid:Maker'):
klass = Maker
else:
log.info("Unrecognized device type. USN={0}".format(usn))
return
device = klass(headers['location'])
log.info("Found device %r at %s" % (device, address))
self._process_device(device)
def _process_device(self, device):
if isinstance(device, Switch):
callback = self._switch_callback
registry = self._switches
elif isinstance(device, Motion):
callback = self._motion_callback
registry = self._motions
elif isinstance(device, Bridge):
callback = self._bridge_callback
registry = self._bridges
for light in device.Lights:
log.info("Found light \"%s\" connected to \"%s\"" % (light, device.name))
for group in device.Groups:
log.info("Found group \"%s\" connected to \"%s\"" % (group, device.name))
elif isinstance(device, Maker):
callback = self._maker_callback
registry = self._makers
else:
return
self.devices[device.name] = device
registry[device.name] = device
if self._with_subscribers:
self.registry.register(device)
self.registry.on(device, 'BinaryState',
device._update_state)
try:
if isinstance(device, Bridge):
pass
else:
device.ping()
except DeviceUnreachable:
return
devicefound.send(device)
callback(device)
def list_switches(self):
"""
List switches discovered in the environment.
"""
return self._switches.keys()
def list_motions(self):
"""
List motions discovered in the environment.
"""
return self._motions.keys()
def list_makers(self):
"""
List makers discovered in the environment.
"""
return self._makers.keys()
def list_bridges(self):
"""
List bridges discovered in the environment.
"""
return self._bridges.keys()
def get(self, name):
alias = self._config.aliases.get(name)
if alias:
matches = lambda x: x == alias
elif name:
matches = matcher(name)
else:
matches = _NOOP
for k in self.devices:
if matches(k):
return self.devices[k]
else:
raise UnknownDevice(name)
def get_switch(self, name):
"""
Get a switch by name.
"""
try:
return self._switches[name]
except KeyError:
raise UnknownDevice(name)
def get_motion(self, name):
"""
Get a motion by name.
"""
try:
return self._motions[name]
except KeyError:
raise UnknownDevice(name)
def get_bridge(self, name):
"""
Get a bridge by name.
"""
try:
return self._bridges[name]
except KeyError:
raise UnknownDevice(name)
def get_maker(self, name):
"""
Get a maker by name.
"""
try:
return self._makers[name]
except KeyError:
raise UnknownDevice(name)
|
iancmcc/ouimeaux | ouimeaux/device/__init__.py | Device.get_state | python | def get_state(self, force_update=False):
if force_update or self._state is None:
return int(self.basicevent.GetBinaryState()['BinaryState'])
return self._state | Returns 0 if off and 1 if on. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/device/__init__.py#L36-L42 | null | class Device(object):
def __init__(self, url):
self._state = None
base_url = url.rsplit('/', 1)[0]
self.host = urlsplit(url).hostname
#self.port = urlsplit(url).port
xml = requests_get(url)
self._config = deviceParser.parseString(xml.content).device
sl = self._config.serviceList
self.services = {}
for svc in sl.service:
svcname = svc.get_serviceType().split(':')[-2]
service = Service(svc, base_url)
service.eventSubURL = base_url + svc.get_eventSubURL()
self.services[svcname] = service
setattr(self, svcname, service)
def _update_state(self, value):
self._state = int(value)
def __getstate__(self):
odict = self.__dict__.copy() # copy the dict since we change it
if 'register_listener' in odict:
del odict['register_listener']
return odict
def get_service(self, name):
try:
return self.services[name]
except KeyError:
raise UnknownService(name)
def list_services(self):
return self.services.keys()
def ping(self):
try:
self.get_state()
except Exception:
raise DeviceUnreachable(self)
def explain(self):
for name, svc in self.services.items():
print(name)
print('-' * len(name))
for aname, action in svc.actions.items():
print(" %s(%s)" % (aname, ', '.join(action.args)))
print()
@property
def model(self):
return self._config.get_modelDescription()
@property
def name(self):
return self._config.get_friendlyName()
@property
def serialnumber(self):
return self._config.get_serialNumber()
|
iancmcc/ouimeaux | ouimeaux/pysignals/dispatcher.py | receiver | python | def receiver(signal, **kwargs):
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator | A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
... | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/pysignals/dispatcher.py#L365-L385 | null | from __future__ import absolute_import
import sys
import threading
import weakref
import logging
from future.builtins import range
import six
from .inspect import func_accepts_kwargs
if six.PY2:
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
pysignals_debug = False
def set_debug( val ):
pysignals_debug = val
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
#from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if pysignals_debug:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
logging.WARNING("Passing `weak` to disconnect has no effect.")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receive(self, **kwargs):
"""
A decorator for connecting receivers to this signal. Used by passing in the
keyword arguments to connect::
@post_save.receive(sender=MyModel)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
self.connect(func, **kwargs)
return func
return _decorator
class StateChange( Signal ):
def __init__(self, providing_args=None):
super(StateChange, self).__init__(providing_args)
self.sender_status = {}
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers *only if* the signal's
contents has changed.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers:
return responses
sender_id = _make_id(sender)
if sender_id not in self.sender_status:
self.sender_status[sender_id] = {}
if self.sender_status[sender_id] == named:
return responses
self.sender_status[sender_id] = named
for receiver in self._live_receivers(sender_id):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
|
iancmcc/ouimeaux | ouimeaux/pysignals/dispatcher.py | Signal.send | python | def send(self, sender, **named):
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses | Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ]. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/pysignals/dispatcher.py#L178-L203 | null | class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
#from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if pysignals_debug:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
logging.WARNING("Passing `weak` to disconnect has no effect.")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receive(self, **kwargs):
"""
A decorator for connecting receivers to this signal. Used by passing in the
keyword arguments to connect::
@post_save.receive(sender=MyModel)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
self.connect(func, **kwargs)
return func
return _decorator
|
iancmcc/ouimeaux | ouimeaux/pysignals/dispatcher.py | Signal.send_robust | python | def send_robust(self, sender, **named):
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses | Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/pysignals/dispatcher.py#L205-L244 | null | class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
#from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if pysignals_debug:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
logging.WARNING("Passing `weak` to disconnect has no effect.")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receive(self, **kwargs):
"""
A decorator for connecting receivers to this signal. Used by passing in the
keyword arguments to connect::
@post_save.receive(sender=MyModel)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
self.connect(func, **kwargs)
return func
return _decorator
|
iancmcc/ouimeaux | ouimeaux/pysignals/dispatcher.py | Signal._live_receivers | python | def _live_receivers(self, sender):
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers | Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/pysignals/dispatcher.py#L257-L294 | [
"def _make_id(target):\n if hasattr(target, '__func__'):\n return (id(target.__self__), id(target.__func__))\n return id(target)\n",
"def _clear_dead_receivers(self):\n # Note: caller is assumed to hold self.lock.\n if self._dead_receivers:\n self._dead_receivers = False\n new_receivers = []\n for r in self.receivers:\n if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:\n continue\n new_receivers.append(r)\n self.receivers = new_receivers\n"
] | class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
#from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if pysignals_debug:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
logging.WARNING("Passing `weak` to disconnect has no effect.")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receive(self, **kwargs):
"""
A decorator for connecting receivers to this signal. Used by passing in the
keyword arguments to connect::
@post_save.receive(sender=MyModel)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
self.connect(func, **kwargs)
return func
return _decorator
|
iancmcc/ouimeaux | ouimeaux/pysignals/dispatcher.py | Signal.receive | python | def receive(self, **kwargs):
def _decorator(func):
self.connect(func, **kwargs)
return func
return _decorator | A decorator for connecting receivers to this signal. Used by passing in the
keyword arguments to connect::
@post_save.receive(sender=MyModel)
def signal_receiver(sender, **kwargs):
... | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/pysignals/dispatcher.py#L305-L318 | null | class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
#from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if pysignals_debug:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if six.PY3:
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if weak is not None:
logging.WARNING("Passing `weak` to disconnect has no effect.")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver. The traceback is always attached to the error at
``__traceback__``.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
if not hasattr(err, '__traceback__'):
err.__traceback__ = sys.exc_info()[2]
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
|
iancmcc/ouimeaux | ouimeaux/pysignals/dispatcher.py | StateChange.send | python | def send(self, sender, **named):
responses = []
if not self.receivers:
return responses
sender_id = _make_id(sender)
if sender_id not in self.sender_status:
self.sender_status[sender_id] = {}
if self.sender_status[sender_id] == named:
return responses
self.sender_status[sender_id] = named
for receiver in self._live_receivers(sender_id):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses | Send signal from sender to all connected receivers *only if* the signal's
contents has changed.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ]. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/pysignals/dispatcher.py#L327-L362 | [
"def _make_id(target):\n if hasattr(target, '__func__'):\n return (id(target.__self__), id(target.__func__))\n return id(target)\n",
"def _live_receivers(self, sender):\n \"\"\"\n Filter sequence of receivers to get resolved, live receivers.\n\n This checks for weak references and resolves them, then returning only\n live receivers.\n \"\"\"\n receivers = None\n if self.use_caching and not self._dead_receivers:\n receivers = self.sender_receivers_cache.get(sender)\n # We could end up here with NO_RECEIVERS even if we do check this case in\n # .send() prior to calling _live_receivers() due to concurrent .send() call.\n if receivers is NO_RECEIVERS:\n return []\n if receivers is None:\n with self.lock:\n self._clear_dead_receivers()\n senderkey = _make_id(sender)\n receivers = []\n for (receiverkey, r_senderkey), receiver in self.receivers:\n if r_senderkey == NONE_ID or r_senderkey == senderkey:\n receivers.append(receiver)\n if self.use_caching:\n if not receivers:\n self.sender_receivers_cache[sender] = NO_RECEIVERS\n else:\n # Note, we must cache the weakref versions.\n self.sender_receivers_cache[sender] = receivers\n non_weak_receivers = []\n for receiver in receivers:\n if isinstance(receiver, weakref.ReferenceType):\n # Dereference the weak reference.\n receiver = receiver()\n if receiver is not None:\n non_weak_receivers.append(receiver)\n else:\n non_weak_receivers.append(receiver)\n return non_weak_receivers\n"
] | class StateChange( Signal ):
def __init__(self, providing_args=None):
super(StateChange, self).__init__(providing_args)
self.sender_status = {}
|
iancmcc/ouimeaux | ouimeaux/device/switch.py | Switch.set_state | python | def set_state(self, state):
self.basicevent.SetBinaryState(BinaryState=int(state))
self._state = int(state) | Set the state of this device to on or off. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/device/switch.py#L8-L13 | null | class Switch(Device):
def off(self):
"""
Turn this device off. If already off, will return "Error".
"""
return self.set_state(0)
def on(self):
"""
Turn this device on. If already on, will return "Error".
"""
return self.set_state(1)
def toggle(self):
"""
Toggle the switch's state.
"""
return self.set_state(not self.get_state())
def blink(self, delay=1):
"""
Toggle the switch once, then again after a delay (in seconds).
"""
self.toggle()
gevent.spawn_later(delay, self.toggle)
def __repr__(self):
return '<WeMo Switch "{name}">'.format(name=self.name)
|
iancmcc/ouimeaux | ouimeaux/device/switch.py | Switch.blink | python | def blink(self, delay=1):
self.toggle()
gevent.spawn_later(delay, self.toggle) | Toggle the switch once, then again after a delay (in seconds). | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/device/switch.py#L33-L38 | [
"def toggle(self):\n \"\"\"\n Toggle the switch's state.\n \"\"\"\n return self.set_state(not self.get_state())\n"
] | class Switch(Device):
def set_state(self, state):
"""
Set the state of this device to on or off.
"""
self.basicevent.SetBinaryState(BinaryState=int(state))
self._state = int(state)
def off(self):
"""
Turn this device off. If already off, will return "Error".
"""
return self.set_state(0)
def on(self):
"""
Turn this device on. If already on, will return "Error".
"""
return self.set_state(1)
def toggle(self):
"""
Toggle the switch's state.
"""
return self.set_state(not self.get_state())
def __repr__(self):
return '<WeMo Switch "{name}">'.format(name=self.name)
|
iancmcc/ouimeaux | ouimeaux/subscribe.py | SubscriptionRegistry.server | python | def server(self):
server = getattr(self, "_server", None)
if server is None:
server = WSGIServer(('', self.port), self._handle, log=None)
self._server = server
return server | UDP server to listen for responses. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/subscribe.py#L95-L103 | null | class SubscriptionRegistry(object):
def __init__(self):
self._devices = {}
self._callbacks = defaultdict(list)
self.port = randint(8300, 8990)
def register(self, device):
if not device:
log.error("Received an invalid device: %r", device)
return
log.info("Subscribing to basic events from %r", device)
# Provide a function to register a callback when the device changes
# state
device.register_listener = partial(self.on, device, 'BinaryState')
self._devices[device.host] = device
self._resubscribe(device.basicevent.eventSubURL)
def _resubscribe(self, url, sid=None):
headers = {'TIMEOUT': 'Second-%d' % 1800}
if sid is not None:
headers['SID'] = sid
else:
host = get_ip_address()
headers.update({
"CALLBACK": '<http://%s:%d>'%(host, self.port),
"NT": "upnp:event"
})
response = requests_request(method="SUBSCRIBE", url=url,
headers=headers)
if response.status_code == 412 and sid:
# Invalid subscription ID. Send an UNSUBSCRIBE for safety and
# start over.
requests_request(method='UNSUBSCRIBE', url=url,
headers={'SID': sid})
return self._resubscribe(url)
timeout = int(response.headers.get('timeout', '1801').replace(
'Second-', ''))
sid = response.headers.get('sid', sid)
gevent.spawn_later(int(timeout * 0.75), self._resubscribe, url, sid)
def _handle(self, environ, start_response):
device = self._devices.get(environ['REMOTE_ADDR'])
if device is not None:
data = environ['wsgi.input'].read()
# trim garbage from end, if any
data = data.split("\n\n")[0]
doc = cElementTree.fromstring(data)
for propnode in doc.findall('./{0}property'.format(NS)):
for property_ in propnode.getchildren():
text = property_.text
if isinstance(device, Insight) and property_.tag=='BinaryState':
text = text.split('|')[0]
subscription.send(device, type=property_.tag, value=text)
self._event(device, property_.tag, text)
start_response('200 OK', [
('Content-Type', 'text/html'),
('Content-Length', str(len(SUCCESS))),
('Connection', 'close')
])
yield SUCCESS
def _event(self, device, type_, value):
for t, callback in self._callbacks.get(device, ()):
if t == type_:
callback(value)
def on(self, device, type, callback):
self._callbacks[device].append((type, callback))
@property
|
iancmcc/ouimeaux | ouimeaux/device/maker.py | Maker.get_state | python | def get_state(self, force_update=False):
# The base implementation using GetBinaryState doesn't work for Maker (always returns 0).
# So pull the switch state from the atrributes instead
if force_update or self._state is None:
return(int(self.maker_attribs.get('switchstate',0)))
return self._state | Returns 0 if off and 1 if on. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/device/maker.py#L11-L19 | null | class Maker(Device):
def __repr__(self):
return '<WeMo Maker "{name}">'.format(name=self.name)
def set_state(self, state):
"""
Set the state of this device to on or off.
"""
self.basicevent.SetBinaryState(BinaryState=int(state))
self._state = int(state)
def off(self):
"""
Turn this device off. If already off, will return "Error".
"""
return self.set_state(0)
def on(self):
"""
Turn this device on. If already on, will return "Error".
"""
return self.set_state(1)
@property
def maker_attribs(self):
makerresp = self.deviceevent.GetAttributes().get('attributeList')
makerresp = "<attributes>" + makerresp + "</attributes>"
makerresp = makerresp.replace(">",">")
makerresp = makerresp.replace("<","<")
attributes = et.fromstring(makerresp)
for attribute in attributes:
if attribute[0].text == "Switch":
switchstate = attribute[1].text
elif attribute[0].text == "Sensor":
sensorstate = attribute[1].text
elif attribute[0].text == "SwitchMode":
switchmode = attribute[1].text
elif attribute[0].text == "SensorPresent":
hassensor = attribute[1].text
return { 'switchstate' : int(switchstate),
'sensorstate' : int(sensorstate),
'switchmode' : int(switchmode),
'hassensor' : int(hassensor)}
@property
def switch_state(self):
return self.maker_attribs['switchstate']
@property
def sensor_state(self):
return self.maker_attribs['sensorstate']
@property
def switch_mode(self):
return self.maker_attribs['switchmode']
@property
def has_sensor(self):
return self.maker_attribs['hassensor']
|
iancmcc/ouimeaux | ouimeaux/discovery.py | UPnP.server | python | def server(self):
server = getattr(self, "_server", None)
if server is None:
log.debug("Binding datagram server to %s", self.bind)
server = DatagramServer(self.bind, self._response_received)
self._server = server
return server | UDP server to listen for responses. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/discovery.py#L59-L68 | null | class UPnP(object):
"""
Makes M-SEARCH requests, filters out non-WeMo responses, and dispatches
signals with the results.
"""
def __init__(self, mcast_ip='239.255.255.250', mcast_port=1900, bind=None):
if bind is None:
host = get_ip_address()
if host.startswith('127.'):
raise UPnPLoopbackException("Using %s as a callback IP for "
"discovery will not be successful.")
port = 54321
bind = '{0}:{1}'.format(host, port)
self.bind = bind
self.mcast_ip = mcast_ip
self.mcast_port = mcast_port
self.clients = {}
def _response_received(self, message, address):
log.debug("Received a response from {0}:{1}".format(*address))
lines = [x.decode() for x in message.splitlines()]
lines.pop(0) # HTTP status
headers = {}
for line in lines:
try:
header, value = line.split(":", 1)
headers[header.lower()] = value.strip()
except ValueError:
continue
if (headers.get('x-user-agent', None) == 'redsonic'):
location=headers.get('location',None)
if location is not None and location not in self.clients:
log.debug("Found WeMo at {0}".format(location))
self.clients[location] = headers
gevent.spawn(discovered.send, self, address=address,
headers=headers)
@property
def broadcast(self):
"""
Send a multicast M-SEARCH request asking for devices to report in.
"""
log.debug("Broadcasting M-SEARCH to %s:%s", self.mcast_ip, self.mcast_port)
request = '\r\n'.join(("M-SEARCH * HTTP/1.1",
"HOST:{mcast_ip}:{mcast_port}",
"ST:upnp:rootdevice",
"MX:2",
'MAN:"ssdp:discover"',
"", "")).format(**self.__dict__)
self.server.sendto(request.encode(), (self.mcast_ip, self.mcast_port))
|
iancmcc/ouimeaux | ouimeaux/discovery.py | UPnP.broadcast | python | def broadcast(self):
log.debug("Broadcasting M-SEARCH to %s:%s", self.mcast_ip, self.mcast_port)
request = '\r\n'.join(("M-SEARCH * HTTP/1.1",
"HOST:{mcast_ip}:{mcast_port}",
"ST:upnp:rootdevice",
"MX:2",
'MAN:"ssdp:discover"',
"", "")).format(**self.__dict__)
self.server.sendto(request.encode(), (self.mcast_ip, self.mcast_port)) | Send a multicast M-SEARCH request asking for devices to report in. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/discovery.py#L70-L81 | null | class UPnP(object):
"""
Makes M-SEARCH requests, filters out non-WeMo responses, and dispatches
signals with the results.
"""
def __init__(self, mcast_ip='239.255.255.250', mcast_port=1900, bind=None):
if bind is None:
host = get_ip_address()
if host.startswith('127.'):
raise UPnPLoopbackException("Using %s as a callback IP for "
"discovery will not be successful.")
port = 54321
bind = '{0}:{1}'.format(host, port)
self.bind = bind
self.mcast_ip = mcast_ip
self.mcast_port = mcast_port
self.clients = {}
def _response_received(self, message, address):
log.debug("Received a response from {0}:{1}".format(*address))
lines = [x.decode() for x in message.splitlines()]
lines.pop(0) # HTTP status
headers = {}
for line in lines:
try:
header, value = line.split(":", 1)
headers[header.lower()] = value.strip()
except ValueError:
continue
if (headers.get('x-user-agent', None) == 'redsonic'):
location=headers.get('location',None)
if location is not None and location not in self.clients:
log.debug("Found WeMo at {0}".format(location))
self.clients[location] = headers
gevent.spawn(discovered.send, self, address=address,
headers=headers)
@property
def server(self):
"""
UDP server to listen for responses.
"""
server = getattr(self, "_server", None)
if server is None:
log.debug("Binding datagram server to %s", self.bind)
server = DatagramServer(self.bind, self._response_received)
self._server = server
return server
|
iancmcc/ouimeaux | ouimeaux/utils.py | retry_with_delay | python | def retry_with_delay(f, delay=60):
@wraps(f)
def inner(*args, **kwargs):
kwargs['timeout'] = 5
remaining = get_retries() + 1
while remaining:
remaining -= 1
try:
return f(*args, **kwargs)
except (requests.ConnectionError, requests.Timeout):
if not remaining:
raise
gevent.sleep(delay)
return inner | Retry the wrapped requests.request function in case of ConnectionError.
Optionally limit the number of retries or set the delay between retries. | train | https://github.com/iancmcc/ouimeaux/blob/89f3d05e7ae0a356690f898a4e1801ea3c104200/ouimeaux/utils.py#L68-L85 | null | from functools import wraps
import re
import socket
import struct
import time
import gevent
import requests
def tz_hours():
delta = time.localtime().tm_hour - time.gmtime().tm_hour
sign = '-' if delta < 0 else ''
return "%s%02d.00" % (sign, abs(delta))
def is_dst():
return 1 if time.localtime().tm_isdst else 0
def get_timesync():
timesync = """
<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<u:TimeSync xmlns:u="urn:Belkin:service:timesync:1">
<UTC>{utc}</UTC>
<TimeZone>{tz}</TimeZone>
<dst>{dst}</dst>
<DstSupported>{dstsupported}</DstSupported>
</u:TimeSync>
</s:Body>
</s:Envelope>""".format(
utc=int(time.time()),
tz=tz_hours(),
dst=is_dst(),
dstsupported=is_dst()).strip()
return timesync
def get_ip_address():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('1.2.3.4', 9))
return s.getsockname()[0]
except socket.error:
return None
finally:
del s
def matcher(match_string):
pattern = re.compile('.*?'.join(re.escape(c) for c in match_string.lower()))
def matches(s):
return pattern.search(s.lower()) is not None
return matches
# This is pretty arbitrary. I'm choosing, for no real reason, the length of
# a subscription.
_RETRIES = 1801/60
def get_retries():
return _RETRIES
requests_get = retry_with_delay(requests.get)
requests_post = retry_with_delay(requests.post)
requests_request = retry_with_delay(requests.request)
|
r4fek/django-cassandra-engine | django_cassandra_engine/base/introspection.py | CassandraDatabaseIntrospection._discover_models | python | def _discover_models(self):
apps = get_installed_apps()
connection = self.connection.connection.alias
keyspace = self.connection.connection.keyspace
for app in apps:
self._cql_models[app.__name__] = get_cql_models(
app, connection=connection, keyspace=keyspace) | Return a dict containing a list of cassandra.cqlengine.Model classes
within installed App. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/base/introspection.py#L20-L32 | null | class CassandraDatabaseIntrospection(BaseDatabaseIntrospection):
def __init__(self, *args, **kwargs):
super(CassandraDatabaseIntrospection, self).__init__(*args, **kwargs)
self._cql_models = {}
self._models_discovered = False
@property
def cql_models(self):
if not self._models_discovered:
self._discover_models()
self._models_discovered = True
return self._cql_models
def django_table_names(self, only_existing=False, **kwargs):
"""
Returns a list of all table names that have associated cqlengine models
and are present in settings.INSTALLED_APPS.
"""
all_models = list(chain.from_iterable(self.cql_models.values()))
tables = [model.column_family_name(include_keyspace=False)
for model in all_models]
return tables
def table_names(self, cursor=None, **kwargs):
"""
Returns all table names in current keyspace
"""
# Avoid migration code being executed
if cursor:
return []
connection = self.connection.connection
keyspace_name = connection.keyspace
if not connection.cluster.schema_metadata_enabled and \
keyspace_name not in connection.cluster.metadata.keyspaces:
connection.cluster.refresh_schema_metadata()
keyspace = connection.cluster.metadata.keyspaces[keyspace_name]
return keyspace.tables
def get_table_list(self, cursor):
return self.table_names(cursor)
def sequence_list(self):
"""
Sequences are not supported
"""
return []
def get_relations(self, *_):
"""No relations in nonrel database"""
return []
def get_table_description(self, *_):
"""
Unfortunately we can't use `DESCRIBE table_name` here
because DESCRIBE isn't part of CQL language..
"""
return ""
def get_constraints(self, cursor, table_name):
return {}
def get_indexes(self, cursor, table_name):
return {}
|
r4fek/django-cassandra-engine | django_cassandra_engine/base/introspection.py | CassandraDatabaseIntrospection.django_table_names | python | def django_table_names(self, only_existing=False, **kwargs):
all_models = list(chain.from_iterable(self.cql_models.values()))
tables = [model.column_family_name(include_keyspace=False)
for model in all_models]
return tables | Returns a list of all table names that have associated cqlengine models
and are present in settings.INSTALLED_APPS. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/base/introspection.py#L41-L51 | null | class CassandraDatabaseIntrospection(BaseDatabaseIntrospection):
def __init__(self, *args, **kwargs):
super(CassandraDatabaseIntrospection, self).__init__(*args, **kwargs)
self._cql_models = {}
self._models_discovered = False
def _discover_models(self):
"""
Return a dict containing a list of cassandra.cqlengine.Model classes
within installed App.
"""
apps = get_installed_apps()
connection = self.connection.connection.alias
keyspace = self.connection.connection.keyspace
for app in apps:
self._cql_models[app.__name__] = get_cql_models(
app, connection=connection, keyspace=keyspace)
@property
def cql_models(self):
if not self._models_discovered:
self._discover_models()
self._models_discovered = True
return self._cql_models
def table_names(self, cursor=None, **kwargs):
"""
Returns all table names in current keyspace
"""
# Avoid migration code being executed
if cursor:
return []
connection = self.connection.connection
keyspace_name = connection.keyspace
if not connection.cluster.schema_metadata_enabled and \
keyspace_name not in connection.cluster.metadata.keyspaces:
connection.cluster.refresh_schema_metadata()
keyspace = connection.cluster.metadata.keyspaces[keyspace_name]
return keyspace.tables
def get_table_list(self, cursor):
return self.table_names(cursor)
def sequence_list(self):
"""
Sequences are not supported
"""
return []
def get_relations(self, *_):
"""No relations in nonrel database"""
return []
def get_table_description(self, *_):
"""
Unfortunately we can't use `DESCRIBE table_name` here
because DESCRIBE isn't part of CQL language..
"""
return ""
def get_constraints(self, cursor, table_name):
return {}
def get_indexes(self, cursor, table_name):
return {}
|
r4fek/django-cassandra-engine | django_cassandra_engine/base/introspection.py | CassandraDatabaseIntrospection.table_names | python | def table_names(self, cursor=None, **kwargs):
# Avoid migration code being executed
if cursor:
return []
connection = self.connection.connection
keyspace_name = connection.keyspace
if not connection.cluster.schema_metadata_enabled and \
keyspace_name not in connection.cluster.metadata.keyspaces:
connection.cluster.refresh_schema_metadata()
keyspace = connection.cluster.metadata.keyspaces[keyspace_name]
return keyspace.tables | Returns all table names in current keyspace | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/base/introspection.py#L53-L68 | null | class CassandraDatabaseIntrospection(BaseDatabaseIntrospection):
def __init__(self, *args, **kwargs):
super(CassandraDatabaseIntrospection, self).__init__(*args, **kwargs)
self._cql_models = {}
self._models_discovered = False
def _discover_models(self):
"""
Return a dict containing a list of cassandra.cqlengine.Model classes
within installed App.
"""
apps = get_installed_apps()
connection = self.connection.connection.alias
keyspace = self.connection.connection.keyspace
for app in apps:
self._cql_models[app.__name__] = get_cql_models(
app, connection=connection, keyspace=keyspace)
@property
def cql_models(self):
if not self._models_discovered:
self._discover_models()
self._models_discovered = True
return self._cql_models
def django_table_names(self, only_existing=False, **kwargs):
"""
Returns a list of all table names that have associated cqlengine models
and are present in settings.INSTALLED_APPS.
"""
all_models = list(chain.from_iterable(self.cql_models.values()))
tables = [model.column_family_name(include_keyspace=False)
for model in all_models]
return tables
def get_table_list(self, cursor):
return self.table_names(cursor)
def sequence_list(self):
"""
Sequences are not supported
"""
return []
def get_relations(self, *_):
"""No relations in nonrel database"""
return []
def get_table_description(self, *_):
"""
Unfortunately we can't use `DESCRIBE table_name` here
because DESCRIBE isn't part of CQL language..
"""
return ""
def get_constraints(self, cursor, table_name):
return {}
def get_indexes(self, cursor, table_name):
return {}
|
r4fek/django-cassandra-engine | django_cassandra_engine/base/creation.py | CassandraDatabaseCreation.set_models_keyspace | python | def set_models_keyspace(self, keyspace):
for models in self.connection.introspection.cql_models.values():
for model in models:
model.__keyspace__ = keyspace | Set keyspace for all connection models | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/base/creation.py#L90-L95 | null | class CassandraDatabaseCreation(BaseDatabaseCreation):
def create_test_db(self, verbosity=1, autoclobber=False, **kwargs):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
from django.conf import settings
self.connection.connect()
default_alias = get_default_cassandra_connection()[0]
# If using django-nose, its runner has already set the db name
# to test_*, so restore it here so that all the models for the
# live keyspace can be found.
self.connection.connection.keyspace = \
self.connection.settings_dict['NAME']
test_database_name = self._get_test_db_name()
# Set all models keyspace to the test keyspace
self.set_models_keyspace(test_database_name)
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print("Creating test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr))
options = self.connection.settings_dict.get('OPTIONS', {})
# temporarily enable schema metadata for sync_cassandra
connection_options_copy = options.get('connection', {}).copy()
if not connection_options_copy.get('schema_metadata_enabled', True):
options['connection']['schema_metadata_enabled'] = True
self.connection.reconnect()
set_default_connection(default_alias)
replication_opts = options.get('replication', {})
replication_factor = replication_opts.pop('replication_factor', 1)
create_keyspace_simple(
test_database_name,
replication_factor,
connections=[self.connection.alias])
settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
self.connection.settings_dict["NAME"] = test_database_name
self.connection.reconnect()
set_default_connection(default_alias)
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command(
'sync_cassandra',
verbosity=max(verbosity - 1, 0),
database=self.connection.alias
)
# restore the original connection options
if not connection_options_copy.get('schema_metadata_enabled', True):
print('Disabling metadata on %s' % self.connection.settings_dict['NAME'])
options['connection']['schema_metadata_enabled'] = \
connection_options_copy['schema_metadata_enabled']
self.connection.reconnect()
set_default_connection(default_alias)
return test_database_name
def _destroy_test_db(self, test_database_name, verbosity=1, **kwargs):
drop_keyspace(test_database_name, connections=[self.connection.alias])
|
r4fek/django-cassandra-engine | django_cassandra_engine/sessions/backends/db.py | SessionStore.create_model_instance | python | def create_model_instance(self, data):
return self.model(
session_key=self._get_or_create_session_key(),
session_data=self.encode(data),
expire_date=self.get_expiry_date(),
) | Return a new instance of the session model object, which represents the
current session state. Intended to be used for saving the session data
to the database.
:param data: | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/sessions/backends/db.py#L31-L42 | null | class SessionStore(DjangoSessionStore):
@classmethod
def get_model_class(cls):
"""
Avoid circular import
"""
from django_cassandra_engine.sessions.models import Session
return Session
@cached_property
def model(self):
return self.get_model_class()
def load(self):
try:
s = self.model.objects.get(session_key=self.session_key)
if s.expire_date <= datetime.now():
s.delete()
raise SuspiciousOperation('old session detected')
return self.decode(s.session_data)
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
return {}
def exists(self, session_key):
try:
self.model.objects.get(session_key=session_key)
return True
except self.model.DoesNotExist:
return False
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
:param must_create:
"""
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
obj = self.create_model_instance(data)
obj.save()
def delete(self, session_key=None):
if session_key is None:
if not self.session_key:
return
session_key = self.session_key
self.model.objects.filter(session_key=session_key).delete()
@classmethod
def clear_expired(cls):
"""
# TODO: implement this
"""
|
r4fek/django-cassandra-engine | django_cassandra_engine/sessions/backends/db.py | SessionStore.save | python | def save(self, must_create=False):
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
obj = self.create_model_instance(data)
obj.save() | Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
:param must_create: | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/sessions/backends/db.py#L66-L79 | null | class SessionStore(DjangoSessionStore):
@classmethod
def get_model_class(cls):
"""
Avoid circular import
"""
from django_cassandra_engine.sessions.models import Session
return Session
@cached_property
def model(self):
return self.get_model_class()
def create_model_instance(self, data):
"""
Return a new instance of the session model object, which represents the
current session state. Intended to be used for saving the session data
to the database.
:param data:
"""
return self.model(
session_key=self._get_or_create_session_key(),
session_data=self.encode(data),
expire_date=self.get_expiry_date(),
)
def load(self):
try:
s = self.model.objects.get(session_key=self.session_key)
if s.expire_date <= datetime.now():
s.delete()
raise SuspiciousOperation('old session detected')
return self.decode(s.session_data)
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
return {}
def exists(self, session_key):
try:
self.model.objects.get(session_key=session_key)
return True
except self.model.DoesNotExist:
return False
def delete(self, session_key=None):
if session_key is None:
if not self.session_key:
return
session_key = self.session_key
self.model.objects.filter(session_key=session_key).delete()
@classmethod
def clear_expired(cls):
"""
# TODO: implement this
"""
|
r4fek/django-cassandra-engine | django_cassandra_engine/rest/serializers.py | DjangoCassandraModelSerializer.get_field_kwargs | python | def get_field_kwargs(self, field_name, model_field):
kwargs = {}
validator_kwarg = list(model_field.validators)
# The following will only be used by ModelField classes.
# Gets removed for everything else.
kwargs['model_field'] = model_field
if model_field.verbose_name and needs_label(model_field, field_name):
kwargs['label'] = capfirst(model_field.verbose_name)
if model_field.help_text:
kwargs['help_text'] = model_field.help_text
max_digits = getattr(model_field, 'max_digits', None)
if max_digits is not None:
kwargs['max_digits'] = max_digits
decimal_places = getattr(model_field, 'decimal_places', None)
if decimal_places is not None:
kwargs['decimal_places'] = decimal_places
if isinstance(model_field, models.TextField):
kwargs['style'] = {'base_template': 'textarea.html'}
if isinstance(model_field, models.AutoField) \
or not model_field.editable:
# If this field is read-only, then return early.
# Further keyword arguments are not valid.
kwargs['read_only'] = True
return kwargs
if model_field.has_default or model_field.blank or model_field.null:
kwargs['required'] = False
if model_field.null and not isinstance(model_field,
models.NullBooleanField):
kwargs['allow_null'] = True
if model_field.blank and (
isinstance(model_field, models.CharField) or
isinstance(model_field, models.TextField) or
isinstance(model_field, columns.Text)
):
kwargs['allow_blank'] = True
if isinstance(model_field, models.FilePathField):
kwargs['path'] = model_field.path
if model_field.match is not None:
kwargs['match'] = model_field.match
if model_field.recursive is not False:
kwargs['recursive'] = model_field.recursive
if model_field.allow_files is not True:
kwargs['allow_files'] = model_field.allow_files
if model_field.allow_folders is not False:
kwargs['allow_folders'] = model_field.allow_folders
if model_field.choices:
# If this model field contains choices, then return early.
# Further keyword arguments are not valid.
kwargs['choices'] = model_field.choices
return kwargs
# Our decimal validation is handled in the field code,
# not validator code.
# (In Django 1.9+ this differs from previous style)
if isinstance(model_field, models.DecimalField) and DecimalValidator:
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, DecimalValidator)
]
# Ensure that max_length is passed explicitly as a keyword arg,
# rather than as a validator.
max_length = getattr(model_field, 'max_length', None)
if max_length is not None and (
isinstance(model_field, models.CharField) or
isinstance(model_field, models.TextField)):
kwargs['max_length'] = max_length
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.MaxLengthValidator)
]
# Ensure that min_length is passed explicitly as a keyword arg,
# rather than as a validator.
min_length = next((
validator.limit_value for validator in validator_kwarg
if isinstance(validator, validators.MinLengthValidator)
), None)
if min_length is not None and isinstance(model_field,
models.CharField):
kwargs['min_length'] = min_length
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.MinLengthValidator)
]
# Ensure that max_value is passed explicitly as a keyword arg,
# rather than as a validator.
max_value = next((
validator.limit_value for validator in validator_kwarg
if isinstance(validator, validators.MaxValueValidator)
), None)
if max_value is not None and isinstance(model_field,
NUMERIC_FIELD_TYPES):
kwargs['max_value'] = max_value
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.MaxValueValidator)
]
# Ensure that max_value is passed explicitly as a keyword arg,
# rather than as a validator.
min_value = next((
validator.limit_value for validator in validator_kwarg
if isinstance(validator, validators.MinValueValidator)
), None)
if min_value is not None and isinstance(model_field,
NUMERIC_FIELD_TYPES):
kwargs['min_value'] = min_value
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.MinValueValidator)
]
# URLField does not need to include the URLValidator argument,
# as it is explicitly added in.
if isinstance(model_field, models.URLField):
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.URLValidator)
]
# EmailField does not need to include the validate_email argument,
# as it is explicitly added in.
if isinstance(model_field, models.EmailField):
validator_kwarg = [
validator for validator in validator_kwarg
if validator is not validators.validate_email
]
# SlugField do not need to include the 'validate_slug' argument,
if isinstance(model_field, models.SlugField):
validator_kwarg = [
validator for validator in validator_kwarg
if validator is not validators.validate_slug
]
# IPAddressField do not need to include the 'validate_ipv46_address'
# argument,
if isinstance(model_field, models.GenericIPAddressField):
validator_kwarg = [
validator for validator in validator_kwarg
if validator is not validators.validate_ipv46_address
]
if getattr(model_field, 'unique', False):
warnings.warn(
'UniqueValidator is currently not supported '
'in DjangoCassandraSerializer'
)
if validator_kwarg:
kwargs['validators'] = validator_kwarg
return kwargs | Creates a default instance of a basic non-relational field. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/rest/serializers.py#L42-L215 | null | class DjangoCassandraModelSerializer(serializers.ModelSerializer):
serializer_field_mapping = {
columns.Text: serializers.CharField,
columns.UUID: serializers.CharField,
columns.Integer: serializers.IntegerField,
columns.TinyInt: serializers.IntegerField,
columns.SmallInt: serializers.IntegerField,
columns.BigInt: serializers.IntegerField,
columns.VarInt: serializers.IntegerField,
columns.Counter: serializers.IntegerField,
columns.Date: serializers.DateTimeField,
columns.Float: serializers.FloatField,
columns.Double: serializers.FloatField,
columns.Decimal: serializers.DecimalField,
columns.Boolean: serializers.BooleanField,
columns.DateTime: serializers.DateTimeField,
columns.List: serializers.ListField,
}
def build_standard_field(self, field_name, model_field):
"""
Create regular model fields.
"""
field_mapping = ClassLookupDict(self.serializer_field_mapping)
field_class = field_mapping[model_field]
field_kwargs = self.get_field_kwargs(field_name, model_field)
if 'choices' in field_kwargs:
# Fields with choices get coerced into `ChoiceField`
# instead of using their regular typed field.
field_class = self.serializer_choice_field
# Some model fields may introduce kwargs that would not be valid
# for the choice field. We need to strip these out.
# Eg. models.DecimalField(max_digits=3, decimal_places=1,
# choices=DECIMAL_CHOICES)
valid_kwargs = {
'read_only', 'write_only',
'required', 'default', 'initial', 'source',
'label', 'help_text', 'style',
'error_messages', 'validators', 'allow_null', 'allow_blank',
'choices'
}
for key in list(field_kwargs.keys()):
if key not in valid_kwargs:
field_kwargs.pop(key)
if not issubclass(field_class, ModelField):
# `model_field` is only valid for the fallback case of
# `ModelField`, which is used when no other typed field
# matched to the model field.
field_kwargs.pop('model_field', None)
if not issubclass(field_class, CharField) and not \
issubclass(field_class, ChoiceField):
# `allow_blank` is only valid for textual fields.
field_kwargs.pop('allow_blank', None)
if postgres_fields and isinstance(model_field,
postgres_fields.ArrayField):
# Populate the `child` argument on `ListField` instances generated
# for the PostgrSQL specfic `ArrayField`.
child_model_field = model_field.base_field
child_field_class, child_field_kwargs = self.build_standard_field(
'child', child_model_field
)
field_kwargs['child'] = child_field_class(**child_field_kwargs)
return field_class, field_kwargs
|
r4fek/django-cassandra-engine | django_cassandra_engine/management/commands/sync_cassandra.py | Command._import_management | python | def _import_management():
from importlib import import_module
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except SystemError:
# We get SystemError if INSTALLED_APPS contains the
# name of a class rather than a module
pass
except ImportError as exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') \
or 'management' not in msg:
raise | Import the 'management' module within each installed app, to register
dispatcher events. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/management/commands/sync_cassandra.py#L23-L51 | null | class Command(BaseCommand):
help = 'Sync Cassandra database(s)'
def add_arguments(self, parser):
parser.add_argument(
'--database',
action='store',
dest='database',
default=None,
help='Nominates a database to synchronize.',
)
@staticmethod
def sync(self, alias):
engine = get_engine_from_db_alias(alias)
if engine != 'django_cassandra_engine':
raise CommandError('Database {} is not cassandra!'.format(alias))
connection = connections[alias]
connection.connect()
options = connection.settings_dict.get('OPTIONS', {})
keyspace = connection.settings_dict['NAME']
replication_opts = options.get('replication', {})
strategy_class = replication_opts.pop('strategy_class',
'SimpleStrategy')
replication_factor = replication_opts.pop('replication_factor', 1)
self.stdout.write('Creating keyspace {} [CONNECTION {}] ..'.format(
keyspace, alias))
if strategy_class == 'SimpleStrategy':
management.create_keyspace_simple(
keyspace,
replication_factor,
connections=[alias])
else:
management.create_keyspace_network_topology(
keyspace,
replication_opts,
connections=[alias])
connection.connection.cluster.refresh_schema_metadata()
connection.connection.cluster.schema_metadata_enabled = True
for app_name, app_models \
in connection.introspection.cql_models.items():
for model in app_models:
self.stdout.write('Syncing %s.%s' % (app_name, model.__name__))
# patch this object used for type check in management.sync_table()
management.Model = (Model, DjangoCassandraModel)
management.sync_table(model, keyspaces=[keyspace],
connections=[alias])
def handle(self, **options):
self._import_management()
database = options.get('database')
if database is not None:
return self.sync(database)
cassandra_alias = None
for alias in connections:
engine = get_engine_from_db_alias(alias)
if engine == 'django_cassandra_engine':
self.sync(alias)
cassandra_alias = alias
if cassandra_alias is None:
raise CommandError(
'Please add django_cassandra_engine backend to DATABASES!')
|
r4fek/django-cassandra-engine | django_cassandra_engine/utils.py | get_installed_apps | python | def get_installed_apps():
if django.VERSION >= (1, 7):
from django.apps import apps
return [a.models_module for a in apps.get_app_configs()
if a.models_module is not None]
else:
from django.db import models
return models.get_apps() | Return list of all installed apps | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/utils.py#L57-L67 | null | import inspect
import django
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from .compat import cqlengine
class CursorWrapper(object):
"""
Simple CursorWrapper implementation based on django.db.utils.CursorWrapper
"""
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall',
'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def callproc(self, procname, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
def get_cql_models(app, connection=None, keyspace=None):
"""
:param app: django models module
:param connection: connection name
:param keyspace: keyspace
:return: list of all cassandra.cqlengine.Model within app that should be
synced to keyspace.
"""
from .models import DjangoCassandraModel
models = []
single_cassandra_connection = len(list(get_cassandra_connections())) == 1
is_default_connection = connection == DEFAULT_DB_ALIAS or \
single_cassandra_connection
for name, obj in inspect.getmembers(app):
cql_model_types = (
cqlengine.models.Model,
DjangoCassandraModel
)
if (
inspect.isclass(obj) and issubclass(obj, cql_model_types) and
not obj.__abstract__
):
if obj.__connection__ == connection or \
(obj.__connection__ is None and is_default_connection) or \
obj.__connection__ is None and obj.__keyspace__ is not None and obj.__keyspace__ == keyspace:
models.append(obj)
return models
def get_cassandra_connections():
"""
:return: List of tuples (db_alias, connection) for all cassandra
connections in DATABASES dict.
"""
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
yield alias, connections[alias]
def get_default_cassandra_connection():
"""
Return first default cassandra connection
:return:
"""
for alias, conn in get_cassandra_connections():
if conn.connection.default:
return alias, conn
return list(get_cassandra_connections())[0]
def get_cassandra_connection(alias=None, name=None):
"""
:return: cassandra connection matching alias or name or just first found.
"""
for _alias, connection in get_cassandra_connections():
if alias is not None:
if alias == _alias:
return connection
elif name is not None:
if name == connection.settings_dict['NAME']:
return connection
else:
return connection
def get_cassandra_db_alias():
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
return alias
def get_engine_from_db_alias(db_alias):
"""
:param db_alias: database alias
:return: database engine from DATABASES dict corresponding to db_alias
or None if db_alias was not found
"""
return settings.DATABASES.get(db_alias, {}).get('ENGINE', None)
|
r4fek/django-cassandra-engine | django_cassandra_engine/utils.py | get_cql_models | python | def get_cql_models(app, connection=None, keyspace=None):
from .models import DjangoCassandraModel
models = []
single_cassandra_connection = len(list(get_cassandra_connections())) == 1
is_default_connection = connection == DEFAULT_DB_ALIAS or \
single_cassandra_connection
for name, obj in inspect.getmembers(app):
cql_model_types = (
cqlengine.models.Model,
DjangoCassandraModel
)
if (
inspect.isclass(obj) and issubclass(obj, cql_model_types) and
not obj.__abstract__
):
if obj.__connection__ == connection or \
(obj.__connection__ is None and is_default_connection) or \
obj.__connection__ is None and obj.__keyspace__ is not None and obj.__keyspace__ == keyspace:
models.append(obj)
return models | :param app: django models module
:param connection: connection name
:param keyspace: keyspace
:return: list of all cassandra.cqlengine.Model within app that should be
synced to keyspace. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/utils.py#L70-L98 | [
"def get_cassandra_connections():\n \"\"\"\n :return: List of tuples (db_alias, connection) for all cassandra\n connections in DATABASES dict.\n \"\"\"\n\n from django.db import connections\n for alias in connections:\n engine = connections[alias].settings_dict.get('ENGINE', '')\n if engine == 'django_cassandra_engine':\n yield alias, connections[alias]\n"
] | import inspect
import django
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from .compat import cqlengine
class CursorWrapper(object):
"""
Simple CursorWrapper implementation based on django.db.utils.CursorWrapper
"""
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall',
'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def callproc(self, procname, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
def get_installed_apps():
"""
Return list of all installed apps
"""
if django.VERSION >= (1, 7):
from django.apps import apps
return [a.models_module for a in apps.get_app_configs()
if a.models_module is not None]
else:
from django.db import models
return models.get_apps()
def get_cassandra_connections():
"""
:return: List of tuples (db_alias, connection) for all cassandra
connections in DATABASES dict.
"""
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
yield alias, connections[alias]
def get_default_cassandra_connection():
"""
Return first default cassandra connection
:return:
"""
for alias, conn in get_cassandra_connections():
if conn.connection.default:
return alias, conn
return list(get_cassandra_connections())[0]
def get_cassandra_connection(alias=None, name=None):
"""
:return: cassandra connection matching alias or name or just first found.
"""
for _alias, connection in get_cassandra_connections():
if alias is not None:
if alias == _alias:
return connection
elif name is not None:
if name == connection.settings_dict['NAME']:
return connection
else:
return connection
def get_cassandra_db_alias():
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
return alias
def get_engine_from_db_alias(db_alias):
"""
:param db_alias: database alias
:return: database engine from DATABASES dict corresponding to db_alias
or None if db_alias was not found
"""
return settings.DATABASES.get(db_alias, {}).get('ENGINE', None)
|
r4fek/django-cassandra-engine | django_cassandra_engine/utils.py | get_cassandra_connections | python | def get_cassandra_connections():
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
yield alias, connections[alias] | :return: List of tuples (db_alias, connection) for all cassandra
connections in DATABASES dict. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/utils.py#L101-L111 | null | import inspect
import django
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from .compat import cqlengine
class CursorWrapper(object):
"""
Simple CursorWrapper implementation based on django.db.utils.CursorWrapper
"""
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall',
'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def callproc(self, procname, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
def get_installed_apps():
"""
Return list of all installed apps
"""
if django.VERSION >= (1, 7):
from django.apps import apps
return [a.models_module for a in apps.get_app_configs()
if a.models_module is not None]
else:
from django.db import models
return models.get_apps()
def get_cql_models(app, connection=None, keyspace=None):
"""
:param app: django models module
:param connection: connection name
:param keyspace: keyspace
:return: list of all cassandra.cqlengine.Model within app that should be
synced to keyspace.
"""
from .models import DjangoCassandraModel
models = []
single_cassandra_connection = len(list(get_cassandra_connections())) == 1
is_default_connection = connection == DEFAULT_DB_ALIAS or \
single_cassandra_connection
for name, obj in inspect.getmembers(app):
cql_model_types = (
cqlengine.models.Model,
DjangoCassandraModel
)
if (
inspect.isclass(obj) and issubclass(obj, cql_model_types) and
not obj.__abstract__
):
if obj.__connection__ == connection or \
(obj.__connection__ is None and is_default_connection) or \
obj.__connection__ is None and obj.__keyspace__ is not None and obj.__keyspace__ == keyspace:
models.append(obj)
return models
def get_default_cassandra_connection():
"""
Return first default cassandra connection
:return:
"""
for alias, conn in get_cassandra_connections():
if conn.connection.default:
return alias, conn
return list(get_cassandra_connections())[0]
def get_cassandra_connection(alias=None, name=None):
"""
:return: cassandra connection matching alias or name or just first found.
"""
for _alias, connection in get_cassandra_connections():
if alias is not None:
if alias == _alias:
return connection
elif name is not None:
if name == connection.settings_dict['NAME']:
return connection
else:
return connection
def get_cassandra_db_alias():
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
return alias
def get_engine_from_db_alias(db_alias):
"""
:param db_alias: database alias
:return: database engine from DATABASES dict corresponding to db_alias
or None if db_alias was not found
"""
return settings.DATABASES.get(db_alias, {}).get('ENGINE', None)
|
r4fek/django-cassandra-engine | django_cassandra_engine/utils.py | get_default_cassandra_connection | python | def get_default_cassandra_connection():
for alias, conn in get_cassandra_connections():
if conn.connection.default:
return alias, conn
return list(get_cassandra_connections())[0] | Return first default cassandra connection
:return: | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/utils.py#L114-L123 | [
"def get_cassandra_connections():\n \"\"\"\n :return: List of tuples (db_alias, connection) for all cassandra\n connections in DATABASES dict.\n \"\"\"\n\n from django.db import connections\n for alias in connections:\n engine = connections[alias].settings_dict.get('ENGINE', '')\n if engine == 'django_cassandra_engine':\n yield alias, connections[alias]\n"
] | import inspect
import django
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from .compat import cqlengine
class CursorWrapper(object):
"""
Simple CursorWrapper implementation based on django.db.utils.CursorWrapper
"""
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall',
'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def callproc(self, procname, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
def get_installed_apps():
"""
Return list of all installed apps
"""
if django.VERSION >= (1, 7):
from django.apps import apps
return [a.models_module for a in apps.get_app_configs()
if a.models_module is not None]
else:
from django.db import models
return models.get_apps()
def get_cql_models(app, connection=None, keyspace=None):
"""
:param app: django models module
:param connection: connection name
:param keyspace: keyspace
:return: list of all cassandra.cqlengine.Model within app that should be
synced to keyspace.
"""
from .models import DjangoCassandraModel
models = []
single_cassandra_connection = len(list(get_cassandra_connections())) == 1
is_default_connection = connection == DEFAULT_DB_ALIAS or \
single_cassandra_connection
for name, obj in inspect.getmembers(app):
cql_model_types = (
cqlengine.models.Model,
DjangoCassandraModel
)
if (
inspect.isclass(obj) and issubclass(obj, cql_model_types) and
not obj.__abstract__
):
if obj.__connection__ == connection or \
(obj.__connection__ is None and is_default_connection) or \
obj.__connection__ is None and obj.__keyspace__ is not None and obj.__keyspace__ == keyspace:
models.append(obj)
return models
def get_cassandra_connections():
"""
:return: List of tuples (db_alias, connection) for all cassandra
connections in DATABASES dict.
"""
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
yield alias, connections[alias]
def get_cassandra_connection(alias=None, name=None):
"""
:return: cassandra connection matching alias or name or just first found.
"""
for _alias, connection in get_cassandra_connections():
if alias is not None:
if alias == _alias:
return connection
elif name is not None:
if name == connection.settings_dict['NAME']:
return connection
else:
return connection
def get_cassandra_db_alias():
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
return alias
def get_engine_from_db_alias(db_alias):
"""
:param db_alias: database alias
:return: database engine from DATABASES dict corresponding to db_alias
or None if db_alias was not found
"""
return settings.DATABASES.get(db_alias, {}).get('ENGINE', None)
|
r4fek/django-cassandra-engine | django_cassandra_engine/utils.py | get_cassandra_connection | python | def get_cassandra_connection(alias=None, name=None):
for _alias, connection in get_cassandra_connections():
if alias is not None:
if alias == _alias:
return connection
elif name is not None:
if name == connection.settings_dict['NAME']:
return connection
else:
return connection | :return: cassandra connection matching alias or name or just first found. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/utils.py#L126-L139 | [
"def get_cassandra_connections():\n \"\"\"\n :return: List of tuples (db_alias, connection) for all cassandra\n connections in DATABASES dict.\n \"\"\"\n\n from django.db import connections\n for alias in connections:\n engine = connections[alias].settings_dict.get('ENGINE', '')\n if engine == 'django_cassandra_engine':\n yield alias, connections[alias]\n"
] | import inspect
import django
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from .compat import cqlengine
class CursorWrapper(object):
"""
Simple CursorWrapper implementation based on django.db.utils.CursorWrapper
"""
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall',
'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def callproc(self, procname, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
def get_installed_apps():
"""
Return list of all installed apps
"""
if django.VERSION >= (1, 7):
from django.apps import apps
return [a.models_module for a in apps.get_app_configs()
if a.models_module is not None]
else:
from django.db import models
return models.get_apps()
def get_cql_models(app, connection=None, keyspace=None):
"""
:param app: django models module
:param connection: connection name
:param keyspace: keyspace
:return: list of all cassandra.cqlengine.Model within app that should be
synced to keyspace.
"""
from .models import DjangoCassandraModel
models = []
single_cassandra_connection = len(list(get_cassandra_connections())) == 1
is_default_connection = connection == DEFAULT_DB_ALIAS or \
single_cassandra_connection
for name, obj in inspect.getmembers(app):
cql_model_types = (
cqlengine.models.Model,
DjangoCassandraModel
)
if (
inspect.isclass(obj) and issubclass(obj, cql_model_types) and
not obj.__abstract__
):
if obj.__connection__ == connection or \
(obj.__connection__ is None and is_default_connection) or \
obj.__connection__ is None and obj.__keyspace__ is not None and obj.__keyspace__ == keyspace:
models.append(obj)
return models
def get_cassandra_connections():
"""
:return: List of tuples (db_alias, connection) for all cassandra
connections in DATABASES dict.
"""
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
yield alias, connections[alias]
def get_default_cassandra_connection():
"""
Return first default cassandra connection
:return:
"""
for alias, conn in get_cassandra_connections():
if conn.connection.default:
return alias, conn
return list(get_cassandra_connections())[0]
def get_cassandra_db_alias():
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
return alias
def get_engine_from_db_alias(db_alias):
"""
:param db_alias: database alias
:return: database engine from DATABASES dict corresponding to db_alias
or None if db_alias was not found
"""
return settings.DATABASES.get(db_alias, {}).get('ENGINE', None)
|
r4fek/django-cassandra-engine | django_cassandra_engine/management/commands/makemigrations.py | Command.handle | python | def handle(self, *args, **options):
self._change_cassandra_engine_name('django.db.backends.dummy')
try:
super(Command, self).handle(*args, **options)
finally:
self._change_cassandra_engine_name('django_cassandra_engine') | Pretend django_cassandra_engine to be dummy database backend
with no support for migrations. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/management/commands/makemigrations.py#L15-L24 | null | class Command(MakeMigrationsCommand):
@staticmethod
def _change_cassandra_engine_name(name):
for alias, _ in get_cassandra_connections():
connections[alias].settings_dict['ENGINE'] = name
|
r4fek/django-cassandra-engine | django_cassandra_engine/base/operations.py | CassandraDatabaseOperations.sql_flush | python | def sql_flush(self, style, tables, sequences, allow_cascade=False):
for table in tables:
qs = "TRUNCATE {}".format(table)
self.connection.connection.execute(qs)
return [] | Truncate all existing tables in current keyspace.
:returns: an empty list | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/base/operations.py#L36-L47 | null | class CassandraDatabaseOperations(BaseDatabaseOperations):
def pk_default_value(self):
"""
Returns None, to be interpreted by back-ends as a request to
generate a new key for an "inserted" object.
"""
return None
def quote_name(self, name):
"""
Does not do any quoting, as it is not needed for most NoSQL
databases.
"""
return name
def prep_for_like_query(self, value):
"""
Does no conversion, parent string-cast is SQL specific.
"""
return value
def prep_for_iexact_query(self, value):
"""
Does no conversion, parent string-cast is SQL specific.
"""
return value
|
r4fek/django-cassandra-engine | django_cassandra_engine/models/__init__.py | convert_pk_field_names_to_real | python | def convert_pk_field_names_to_real(model, field_names):
pk_field_names = tuple(f.name for f in model._get_primary_key_columns())
def append_field(field_name):
if field_name not in real_field_names:
real_field_names.append(field_name)
real_field_names = []
for name in field_names:
if name == 'pk':
for real_pk_field_name in pk_field_names:
append_field(real_pk_field_name)
elif name == '-pk':
for real_pk_field_name in pk_field_names:
append_field('-' + real_pk_field_name)
else:
append_field(name)
return real_field_names | Convert field names including 'pk' to the real field names:
>>> convert_pk_field_names_to_real(['pk', 'another_field'])
['real_pk_field', 'another_field'] | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/models/__init__.py#L519-L542 | [
"def append_field(field_name):\n if field_name not in real_field_names:\n real_field_names.append(field_name)\n"
] | import logging
import inspect
import copy
import warnings
from operator import attrgetter
import collections
from functools import partial
from itertools import chain
import six
from django.conf import settings
from django.apps import apps
from django.core import validators
from django.db.models.base import ModelBase
from django.utils.translation import ugettext_lazy as _
from django.db.models import options
from ..compat import (BaseModel, ColumnDescriptor, ModelDefinitionException,
ModelException, ModelMetaClass, OrderedDict, columns,
query)
from .constants import ORDER_BY_WARN, ORDER_BY_ERROR_HELP, PK_META_MISSING_HELP
from . import django_field_methods
from . import django_model_methods
log = logging.getLogger(__name__)
_django_manager_attr_names = ('objects', 'default_manager', '_default_manager',
'base_manager', '_base_manager')
class DjangoCassandraOptions(options.Options):
default_field_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
def __init__(self, *args, **kwargs):
self.model_inst = kwargs.pop('cls')
self._defined_columns = self.model_inst._defined_columns
# Add Django attibutes to Columns
self._give_columns_django_field_attributes()
# Call Django to create _meta object
super(DjangoCassandraOptions, self).__init__(*args, **kwargs)
self._private_fields_name = 'private_fields'
if hasattr(self, 'virtual_fields'):
# Django < 1.10
self._private_fields_name = 'virtual_fields'
# Add Columns as Django Fields
for column in six.itervalues(self._defined_columns):
self.add_field(column)
self.setup_pk()
# Set further _meta attributes explicitly
self.proxy_for_model = self.concrete_model = self.model_inst
self.managed = False
self.swappable = False
def can_migrate(self, *args, **kwargs):
return False
def get_all_related_objects_with_model(self, *args, **kwargs):
return []
@property
def related_objects(self):
return []
def setup_pk(self):
self.pk = self.model_inst._get_explicit_pk_column()
def add_field(self, field, **kwargs):
"""Add each field as a private field."""
getattr(self, self._private_fields_name).append(field)
self._expire_cache(reverse=True)
self._expire_cache(reverse=False)
def _get_fields(self, *args, **kwargs):
fields = six.itervalues(self._defined_columns)
return options.make_immutable_fields_list('get_fields()', fields)
def _set_column_django_attributes(self, cql_column, name):
allow_null = (
(not cql_column.required and
not cql_column.is_primary_key and
not cql_column.partition_key) or cql_column.has_default and not cql_column.required
)
cql_column.error_messages = self.default_field_error_messages
cql_column.empty_values = list(validators.EMPTY_VALUES)
cql_column.db_index = cql_column.index
cql_column.serialize = True
cql_column.unique = cql_column.is_primary_key
cql_column.hidden = False
cql_column.auto_created = False
cql_column.help_text = ''
cql_column.blank = allow_null
cql_column.null = allow_null
cql_column.choices = []
cql_column.flatchoices = []
cql_column.validators = []
cql_column.editable = True
cql_column.concrete = True
cql_column.many_to_many = False
cql_column.many_to_one = False
cql_column.one_to_many = False
cql_column.one_to_one = False
cql_column.is_relation = False
cql_column.remote_field = None
cql_column.unique_for_date = None
cql_column.unique_for_month = None
cql_column.unique_for_year = None
cql_column.db_column = None
cql_column.rel = None
cql_column.attname = name
cql_column.field = cql_column
cql_column.model = self.model_inst
cql_column.name = cql_column.db_field_name
cql_column.verbose_name = cql_column.db_field_name
cql_column._verbose_name = cql_column.db_field_name
cql_column.field.related_query_name = lambda: None
def _give_columns_django_field_attributes(self):
"""
Add Django Field attributes to each cqlengine.Column instance.
So that the Django Options class may interact with it as if it were
a Django Field.
"""
methods_to_add = (
django_field_methods.value_from_object,
django_field_methods.value_to_string,
django_field_methods.get_attname,
django_field_methods.get_cache_name,
django_field_methods.pre_save,
django_field_methods.get_prep_value,
django_field_methods.get_choices,
django_field_methods.get_choices_default,
django_field_methods.save_form_data,
django_field_methods.formfield,
django_field_methods.get_db_prep_value,
django_field_methods.get_db_prep_save,
django_field_methods.db_type_suffix,
django_field_methods.select_format,
django_field_methods.get_internal_type,
django_field_methods.get_attname_column,
django_field_methods.check,
django_field_methods._check_field_name,
django_field_methods._check_db_index,
django_field_methods.deconstruct,
django_field_methods.run_validators,
django_field_methods.clean,
django_field_methods.get_db_converters,
django_field_methods.get_prep_lookup,
django_field_methods.get_db_prep_lookup,
django_field_methods.get_filter_kwargs_for_object,
django_field_methods.set_attributes_from_name,
django_field_methods.db_parameters,
django_field_methods.get_pk_value_on_save,
django_field_methods.get_col,
)
for name, cql_column in six.iteritems(self._defined_columns):
self._set_column_django_attributes(cql_column=cql_column, name=name)
for method in methods_to_add:
try:
method_name = method.func_name
except AttributeError:
# python 3
method_name = method.__name__
new_method = six.create_bound_method(method, cql_column)
setattr(cql_column, method_name, new_method)
class DjangoCassandraModelMetaClass(ModelMetaClass, ModelBase):
def __new__(cls, name, bases, attrs):
parents = [b for b in bases if isinstance(b, DjangoCassandraModelMetaClass)]
if not parents:
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
for attr in _django_manager_attr_names:
setattr(cls, attr, None)
# ################################################################
# start code taken from python-driver 3.3.0 ModelMetaClass.__new__
# ################################################################
column_dict = OrderedDict()
primary_keys = OrderedDict()
pk_name = None
# get inherited properties
inherited_columns = OrderedDict()
for base in bases:
for k, v in getattr(base, '_defined_columns', {}).items():
inherited_columns.setdefault(k, v)
# short circuit __abstract__ inheritance
is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False)
# short circuit __discriminator_value__ inheritance
attrs['__discriminator_value__'] = attrs.get('__discriminator_value__')
# TODO __default__ttl__ should be removed in the next major release
options = attrs.get('__options__') or {}
attrs['__default_ttl__'] = options.get('default_time_to_live')
column_definitions = [(k, v) for k, v in attrs.items() if
isinstance(v, columns.Column)]
column_definitions = sorted(column_definitions,
key=lambda x: x[1].position)
is_polymorphic_base = any(
[c[1].discriminator_column for c in column_definitions])
column_definitions = [x for x in
inherited_columns.items()] + column_definitions
discriminator_columns = [c for c in column_definitions if
c[1].discriminator_column]
is_polymorphic = len(discriminator_columns) > 0
if len(discriminator_columns) > 1:
raise ModelDefinitionException(
'only one discriminator_column can be defined in a model, {0} found'.format(
len(discriminator_columns)))
if attrs['__discriminator_value__'] and not is_polymorphic:
raise ModelDefinitionException(
'__discriminator_value__ specified, but no base columns defined with discriminator_column=True')
discriminator_column_name, discriminator_column = \
discriminator_columns[0] if discriminator_columns else (None, None)
if isinstance(discriminator_column,
(columns.BaseContainerColumn, columns.Counter)):
raise ModelDefinitionException(
'counter and container columns cannot be used as discriminator columns')
# find polymorphic base class
polymorphic_base = None
if is_polymorphic and not is_polymorphic_base:
def _get_polymorphic_base(bases):
for base in bases:
if getattr(base, '_is_polymorphic_base', False):
return base
klass = _get_polymorphic_base(base.__bases__)
if klass:
return klass
polymorphic_base = _get_polymorphic_base(bases)
defined_columns = OrderedDict(column_definitions)
# check for primary key
if not is_abstract and not any(
[v.primary_key for k, v in column_definitions]):
raise ModelDefinitionException(
"At least 1 primary key is required.")
counter_columns = [c for c in defined_columns.values() if
isinstance(c, columns.Counter)]
data_columns = [c for c in defined_columns.values() if
not c.primary_key and not isinstance(c,
columns.Counter)]
if counter_columns and data_columns:
raise ModelDefinitionException(
'counter models may not have data columns')
has_partition_keys = any(
v.partition_key for (k, v) in column_definitions)
def _transform_column(col_name, col_obj):
column_dict[col_name] = col_obj
if col_obj.primary_key:
primary_keys[col_name] = col_obj
col_obj.set_column_name(col_name)
# set properties
attrs[col_name] = ColumnDescriptor(col_obj)
partition_key_index = 0
# transform column definitions
for k, v in column_definitions:
# don't allow a column with the same name as a built-in attribute or method
if k in BaseModel.__dict__:
raise ModelDefinitionException(
"column '{0}' conflicts with built-in attribute/method".format(
k))
# counter column primary keys are not allowed
if (v.primary_key or v.partition_key) and isinstance(v,
columns.Counter):
raise ModelDefinitionException(
'counter columns cannot be used as primary keys')
# this will mark the first primary key column as a partition
# key, if one hasn't been set already
if not has_partition_keys and v.primary_key:
v.partition_key = True
has_partition_keys = True
if v.partition_key:
v._partition_key_index = partition_key_index
partition_key_index += 1
overriding = column_dict.get(k)
if overriding:
v.position = overriding.position
v.partition_key = overriding.partition_key
v._partition_key_index = overriding._partition_key_index
_transform_column(k, v)
partition_keys = OrderedDict(
k for k in primary_keys.items() if k[1].partition_key)
clustering_keys = OrderedDict(
k for k in primary_keys.items() if not k[1].partition_key)
if attrs.get('__compute_routing_key__', True):
key_cols = [c for c in partition_keys.values()]
partition_key_index = dict(
(col.db_field_name, col._partition_key_index) for col in
key_cols)
key_cql_types = [c.cql_type for c in key_cols]
key_serializer = staticmethod(
lambda parts, proto_version: [t.to_binary(p, proto_version) for
t, p in
zip(key_cql_types, parts)])
else:
partition_key_index = {}
key_serializer = staticmethod(lambda parts, proto_version: None)
# setup partition key shortcut
if len(partition_keys) == 0:
if not is_abstract:
raise ModelException(
"at least one partition key must be defined")
if len(partition_keys) == 1:
pk_name = [x for x in partition_keys.keys()][0]
attrs['pk'] = attrs[pk_name]
else:
# composite partition key case, get/set a tuple of values
_get = lambda self: tuple(
self._values[c].getval() for c in partition_keys.keys())
_set = lambda self, val: tuple(
self._values[c].setval(v) for (c, v) in
zip(partition_keys.keys(), val))
attrs['pk'] = property(_get, _set)
# some validation
col_names = set()
for v in column_dict.values():
# check for duplicate column names
if v.db_field_name in col_names:
raise ModelException(
"{0} defines the column '{1}' more than once".format(name,
v.db_field_name))
if v.clustering_order and not (
v.primary_key and not v.partition_key):
raise ModelException(
"clustering_order may be specified only for clustering primary keys")
if v.clustering_order and v.clustering_order.lower() not in (
'asc', 'desc'):
raise ModelException(
"invalid clustering order '{0}' for column '{1}'".format(
repr(v.clustering_order), v.db_field_name))
col_names.add(v.db_field_name)
# create db_name -> model name map for loading
db_map = {}
for col_name, field in column_dict.items():
db_field = field.db_field_name
if db_field != col_name:
db_map[db_field] = col_name
# add management members to the class
attrs['_columns'] = column_dict
attrs['_primary_keys'] = primary_keys
attrs['_defined_columns'] = defined_columns
# maps the database field to the models key
attrs['_db_map'] = db_map
attrs['_pk_name'] = pk_name
attrs['_dynamic_columns'] = {}
attrs['_partition_keys'] = partition_keys
attrs['_partition_key_index'] = partition_key_index
attrs['_key_serializer'] = key_serializer
attrs['_clustering_keys'] = clustering_keys
attrs['_has_counter'] = len(counter_columns) > 0
# add polymorphic management attributes
attrs['_is_polymorphic_base'] = is_polymorphic_base
attrs['_is_polymorphic'] = is_polymorphic
attrs['_polymorphic_base'] = polymorphic_base
attrs['_discriminator_column'] = discriminator_column
attrs['_discriminator_column_name'] = discriminator_column_name
attrs['_discriminator_map'] = {} if is_polymorphic_base else None
# setup class exceptions
DoesNotExistBase = None
for base in bases:
DoesNotExistBase = getattr(base, 'DoesNotExist', None)
if DoesNotExistBase is not None:
break
DoesNotExistBase = DoesNotExistBase or attrs.pop('DoesNotExist',
BaseModel.DoesNotExist)
attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase,), {})
MultipleObjectsReturnedBase = None
for base in bases:
MultipleObjectsReturnedBase = getattr(base,
'MultipleObjectsReturned',
None)
if MultipleObjectsReturnedBase is not None:
break
MultipleObjectsReturnedBase = MultipleObjectsReturnedBase or attrs.pop(
'MultipleObjectsReturned', BaseModel.MultipleObjectsReturned)
attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned',
(MultipleObjectsReturnedBase,),
{})
# create the class and add a QuerySet to it
klass = super(ModelBase, cls).__new__(cls, name, bases, attrs)
udts = []
for col in column_dict.values():
columns.resolve_udts(col, udts)
# for user_type in set(udts):
# user_type.register_for_keyspace(klass._get_keyspace())
# ################################################################
# end code taken from python-driver 3.3.0 ModelMetaClass.__new__
# ################################################################
klass._deferred = False
if not is_abstract:
klass = cls._add_django_meta_and_register_model(
klass=klass,
attrs=attrs,
name=name
)
return klass
def add_to_class(cls, name, value):
django_meta_default_names = options.DEFAULT_NAMES
# patch django so Meta.get_pk_field can be specified these models
options.DEFAULT_NAMES = django_meta_default_names + ('get_pk_field',)
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
try:
setattr(cls, name, value)
except AttributeError:
raise AttributeError('failed to set attribute {}'.format(name))
options.DEFAULT_NAMES = django_meta_default_names
@classmethod
def _add_django_meta_and_register_model(cls, klass, attrs, name):
# Create the class.
module = attrs.get('__module__')
if not module:
return klass
new_class = klass
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
if meta:
meta.managed = False
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
# Add _meta/Options attribute to the model
new_class.add_to_class(
'_meta', DjangoCassandraOptions(meta, app_label, cls=new_class))
# Add manager to the model
for manager_attr in _django_manager_attr_names:
new_class.add_to_class(manager_attr, new_class.objects)
# Register the model
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
@classmethod
def check(cls, **kwargs):
errors = []
return errors
class ReadOnlyDjangoCassandraQuerySet(list):
name = 'objects'
use_in_migrations = False
def __init__(self, data, model_class):
if not isinstance(data, collections.Iterable):
raise TypeError(
'ReadOnlyDjangoCassandraQuerySet requires iterable data')
super(ReadOnlyDjangoCassandraQuerySet, self).__init__(data)
self.model = model_class
self.query = StubQuery(model=self.model)
@property
def objects(self):
return self
def first(self):
return next(iter(self), None)
def _clone(self):
return copy.deepcopy(self)
def all(self):
return self
def get_queryset(self):
return self
def count(self):
return len(self)
def exists(self):
return len(self) > 0
def values_list(self, *fields, **kwargs):
fields = convert_pk_field_names_to_real(model=self.model,
field_names=fields)
values_list = []
for model_record in self:
values_list_item = []
for field_name in fields:
values_list_item.append(model_record[field_name])
values_list.append(values_list_item)
if kwargs.get('flat') is True:
values_list = list(chain.from_iterable(values_list))
return values_list
def _raise_not_implemented(self, method_name):
raise NotImplementedError(
'You cannot .{}() on a DjangoCassandraQuerySet which '
'has been ordered using python'.format(method_name)
)
def filter(self, **kwargs):
self._raise_not_implemented(method_name='filter')
def get(self, **kwargs):
self._raise_not_implemented(method_name='get')
def distinct(self, *args, **kwargs):
self._raise_not_implemented(method_name='distinct')
def limit(self, *args, **kwargs):
self._raise_not_implemented(method_name='limit')
def only(self, *args, **kwargs):
self._raise_not_implemented(method_name='only')
def create(self, *args, **kwargs):
self._raise_not_implemented(method_name='create')
def delete(self, *args, **kwargs):
self._raise_not_implemented(method_name='delete')
def defer(self, *args, **kwargs):
self._raise_not_implemented(method_name='defer')
def exclude(self, *args, **kwargs):
self._raise_not_implemented(method_name='defer')
class StubQuery(object):
def __init__(self, model):
self.model = model
self.order_by = ['pk']
@property
def select_related(self):
return False
def add_context(self, *args, **kwargs):
pass
def get_context(self, *args, **kwargs):
return {}
def get_meta(self):
return self.model._meta
def _prepare(self, field):
return self
class DjangoCassandraQuerySet(query.ModelQuerySet):
name = 'objects'
use_in_migrations = False
def __init__(self, *args, **kwargs):
super(query.ModelQuerySet, self).__init__(*args, **kwargs)
self._allow_filtering = True
self.query = StubQuery(model=self.model)
def _select_fields(self):
if self._defer_fields or self._only_fields:
fields = self.model._columns.keys()
if self._defer_fields:
fields = [f for f in fields if f not in self._defer_fields]
elif self._only_fields:
fields = self._only_fields
return [self.model._columns[f].db_field_name for f in fields]
return super(query.ModelQuerySet, self)._select_fields()
def count(self):
if self._count is None:
self._count = super(query.ModelQuerySet, self).count()
return self._count
def get_queryset(self):
if len(self._where) > 0:
return super(query.ModelQuerySet, self).filter()
else:
return super(query.ModelQuerySet, self).all()
def exclude(self, *args, **kwargs):
new_queryset = []
for model in self.get_queryset():
should_exclude_model = False
for field_name, field_value in six.iteritems(kwargs):
if getattr(model, field_name) == field_value:
should_exclude_model = True
break
if not should_exclude_model:
new_queryset.append(model)
return ReadOnlyDjangoCassandraQuerySet(
new_queryset, model_class=self.model)
def python_order_by(self, qset, colnames):
if not isinstance(qset, list):
raise TypeError('qset must be a list')
colnames = convert_pk_field_names_to_real(model=self.model,
field_names=colnames)
any_cols_revesed = any(c.startswith('-') for c in colnames)
if any_cols_revesed:
for col in colnames:
should_reverse = col.startswith('-')
if should_reverse:
col = col[1:]
qset.sort(key=attrgetter(col), reverse=should_reverse)
else:
new_colnames = []
for col in colnames:
if col == 'pk':
pk_cols = self.model._get_primary_key_column_names()
for pk_name in pk_cols:
new_colnames.append(pk_name)
else:
new_colnames.append(col)
try:
qset.sort(key=attrgetter(*new_colnames))
except AttributeError:
msg = 'Can\'t resolve one of column names: {}'.format(
*new_colnames)
raise query.QueryException(msg)
return ReadOnlyDjangoCassandraQuerySet(qset, model_class=self.model)
def exists(self):
return self.count() > 0
def get(self, *args, **kwargs):
obj = super(DjangoCassandraQuerySet, self).get(*args, **kwargs)
obj.pk = getattr(obj, obj._get_explicit_pk_column().name)
return obj
def order_by(self, *colnames):
if len(colnames) == 0:
clone = copy.deepcopy(self)
clone._order = []
return clone
order_using_python = False
conditions = []
for col in colnames:
try:
conditions.append('"{0}" {1}'.format(
*self._get_ordering_condition(col))
)
except query.QueryException as exc:
order_by_exception = (
'Can\'t order' in str(exc) or
'Can\'t resolve the column name' in str(exc)
)
if order_by_exception:
order_using_python = settings.CASSANDRA_FALLBACK_ORDER_BY_PYTHON
if order_using_python:
log.debug('ordering in python column "%s"', col)
msg = ORDER_BY_WARN.format(col=col, exc=exc)
warnings.warn(msg)
else:
raise query.QueryException(
'{exc}\n\n'
'{help}'.format(exc=exc, help=ORDER_BY_ERROR_HELP))
else:
raise exc
clone = copy.deepcopy(self)
if order_using_python is True:
return self.python_order_by(qset=list(clone), colnames=colnames)
else:
clone._order.extend(conditions)
return clone
def values_list(self, *fields, **kwargs):
if 'pk' in fields:
fields = convert_pk_field_names_to_real(
model=self.model, field_names=fields)
super_values_list = super(DjangoCassandraQuerySet, self).values_list
return super_values_list(*fields, **kwargs)
def _clone(self):
return copy.deepcopy(self)
class DjangoCassandraModel(
six.with_metaclass(DjangoCassandraModelMetaClass, BaseModel)
):
__queryset__ = DjangoCassandraQuerySet
__abstract__ = True
__table_name__ = None
__table_name_case_sensitive__ = False
__keyspace__ = None
__options__ = None
__discriminator_value__ = None
__compute_routing_key__ = True
def __init__(self, *args, **kwargs):
super(DjangoCassandraModel, self).__init__(*args, **kwargs)
methods = inspect.getmembers(django_model_methods, inspect.isfunction)
for method_name, method in methods:
new_method = partial(method, self)
setattr(self, method_name, new_method)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
@classmethod
def get(cls, *args, **kwargs):
raise AttributeError('model has no attribute \'get\'')
@classmethod
def filter(cls, *args, **kwargs):
raise AttributeError('model has no attribute \'filter\'')
@classmethod
def all(cls, *args, **kwargs):
raise AttributeError('model has no attribute \'all\'')
@classmethod
def _get_primary_key_columns(cls):
return tuple(c for c in six.itervalues(cls._columns)
if c.is_primary_key is True)
@classmethod
def _get_primary_key_column_names(cls):
return tuple(c.name for c in cls._get_primary_key_columns())
@classmethod
def _get_column(cls, name):
"""
Based on cqlengine.models.BaseModel._get_column.
But to work with 'pk'
"""
if name == 'pk':
return cls._meta.get_field(cls._meta.pk.name)
return cls._columns[name]
@classmethod
def _get_explicit_pk_column(cls):
try:
if len(cls._primary_keys) > 1:
try:
pk_field = cls.Meta.get_pk_field
except AttributeError:
raise RuntimeError(PK_META_MISSING_HELP.format(cls))
return cls._primary_keys[pk_field]
else:
return list(six.itervalues(cls._primary_keys))[0]
except IndexError:
return None
|
r4fek/django-cassandra-engine | django_cassandra_engine/models/__init__.py | DjangoCassandraOptions.add_field | python | def add_field(self, field, **kwargs):
getattr(self, self._private_fields_name).append(field)
self._expire_cache(reverse=True)
self._expire_cache(reverse=False) | Add each field as a private field. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/models/__init__.py#L80-L84 | null | class DjangoCassandraOptions(options.Options):
default_field_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
def __init__(self, *args, **kwargs):
self.model_inst = kwargs.pop('cls')
self._defined_columns = self.model_inst._defined_columns
# Add Django attibutes to Columns
self._give_columns_django_field_attributes()
# Call Django to create _meta object
super(DjangoCassandraOptions, self).__init__(*args, **kwargs)
self._private_fields_name = 'private_fields'
if hasattr(self, 'virtual_fields'):
# Django < 1.10
self._private_fields_name = 'virtual_fields'
# Add Columns as Django Fields
for column in six.itervalues(self._defined_columns):
self.add_field(column)
self.setup_pk()
# Set further _meta attributes explicitly
self.proxy_for_model = self.concrete_model = self.model_inst
self.managed = False
self.swappable = False
def can_migrate(self, *args, **kwargs):
return False
def get_all_related_objects_with_model(self, *args, **kwargs):
return []
@property
def related_objects(self):
return []
def setup_pk(self):
self.pk = self.model_inst._get_explicit_pk_column()
def _get_fields(self, *args, **kwargs):
fields = six.itervalues(self._defined_columns)
return options.make_immutable_fields_list('get_fields()', fields)
def _set_column_django_attributes(self, cql_column, name):
allow_null = (
(not cql_column.required and
not cql_column.is_primary_key and
not cql_column.partition_key) or cql_column.has_default and not cql_column.required
)
cql_column.error_messages = self.default_field_error_messages
cql_column.empty_values = list(validators.EMPTY_VALUES)
cql_column.db_index = cql_column.index
cql_column.serialize = True
cql_column.unique = cql_column.is_primary_key
cql_column.hidden = False
cql_column.auto_created = False
cql_column.help_text = ''
cql_column.blank = allow_null
cql_column.null = allow_null
cql_column.choices = []
cql_column.flatchoices = []
cql_column.validators = []
cql_column.editable = True
cql_column.concrete = True
cql_column.many_to_many = False
cql_column.many_to_one = False
cql_column.one_to_many = False
cql_column.one_to_one = False
cql_column.is_relation = False
cql_column.remote_field = None
cql_column.unique_for_date = None
cql_column.unique_for_month = None
cql_column.unique_for_year = None
cql_column.db_column = None
cql_column.rel = None
cql_column.attname = name
cql_column.field = cql_column
cql_column.model = self.model_inst
cql_column.name = cql_column.db_field_name
cql_column.verbose_name = cql_column.db_field_name
cql_column._verbose_name = cql_column.db_field_name
cql_column.field.related_query_name = lambda: None
def _give_columns_django_field_attributes(self):
"""
Add Django Field attributes to each cqlengine.Column instance.
So that the Django Options class may interact with it as if it were
a Django Field.
"""
methods_to_add = (
django_field_methods.value_from_object,
django_field_methods.value_to_string,
django_field_methods.get_attname,
django_field_methods.get_cache_name,
django_field_methods.pre_save,
django_field_methods.get_prep_value,
django_field_methods.get_choices,
django_field_methods.get_choices_default,
django_field_methods.save_form_data,
django_field_methods.formfield,
django_field_methods.get_db_prep_value,
django_field_methods.get_db_prep_save,
django_field_methods.db_type_suffix,
django_field_methods.select_format,
django_field_methods.get_internal_type,
django_field_methods.get_attname_column,
django_field_methods.check,
django_field_methods._check_field_name,
django_field_methods._check_db_index,
django_field_methods.deconstruct,
django_field_methods.run_validators,
django_field_methods.clean,
django_field_methods.get_db_converters,
django_field_methods.get_prep_lookup,
django_field_methods.get_db_prep_lookup,
django_field_methods.get_filter_kwargs_for_object,
django_field_methods.set_attributes_from_name,
django_field_methods.db_parameters,
django_field_methods.get_pk_value_on_save,
django_field_methods.get_col,
)
for name, cql_column in six.iteritems(self._defined_columns):
self._set_column_django_attributes(cql_column=cql_column, name=name)
for method in methods_to_add:
try:
method_name = method.func_name
except AttributeError:
# python 3
method_name = method.__name__
new_method = six.create_bound_method(method, cql_column)
setattr(cql_column, method_name, new_method)
|
r4fek/django-cassandra-engine | django_cassandra_engine/models/__init__.py | DjangoCassandraOptions._give_columns_django_field_attributes | python | def _give_columns_django_field_attributes(self):
methods_to_add = (
django_field_methods.value_from_object,
django_field_methods.value_to_string,
django_field_methods.get_attname,
django_field_methods.get_cache_name,
django_field_methods.pre_save,
django_field_methods.get_prep_value,
django_field_methods.get_choices,
django_field_methods.get_choices_default,
django_field_methods.save_form_data,
django_field_methods.formfield,
django_field_methods.get_db_prep_value,
django_field_methods.get_db_prep_save,
django_field_methods.db_type_suffix,
django_field_methods.select_format,
django_field_methods.get_internal_type,
django_field_methods.get_attname_column,
django_field_methods.check,
django_field_methods._check_field_name,
django_field_methods._check_db_index,
django_field_methods.deconstruct,
django_field_methods.run_validators,
django_field_methods.clean,
django_field_methods.get_db_converters,
django_field_methods.get_prep_lookup,
django_field_methods.get_db_prep_lookup,
django_field_methods.get_filter_kwargs_for_object,
django_field_methods.set_attributes_from_name,
django_field_methods.db_parameters,
django_field_methods.get_pk_value_on_save,
django_field_methods.get_col,
)
for name, cql_column in six.iteritems(self._defined_columns):
self._set_column_django_attributes(cql_column=cql_column, name=name)
for method in methods_to_add:
try:
method_name = method.func_name
except AttributeError:
# python 3
method_name = method.__name__
new_method = six.create_bound_method(method, cql_column)
setattr(cql_column, method_name, new_method) | Add Django Field attributes to each cqlengine.Column instance.
So that the Django Options class may interact with it as if it were
a Django Field. | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/models/__init__.py#L130-L179 | [
"def _set_column_django_attributes(self, cql_column, name):\n allow_null = (\n (not cql_column.required and\n not cql_column.is_primary_key and\n not cql_column.partition_key) or cql_column.has_default and not cql_column.required\n )\n cql_column.error_messages = self.default_field_error_messages\n cql_column.empty_values = list(validators.EMPTY_VALUES)\n cql_column.db_index = cql_column.index\n cql_column.serialize = True\n cql_column.unique = cql_column.is_primary_key\n cql_column.hidden = False\n cql_column.auto_created = False\n cql_column.help_text = ''\n cql_column.blank = allow_null\n cql_column.null = allow_null\n cql_column.choices = []\n cql_column.flatchoices = []\n cql_column.validators = []\n cql_column.editable = True\n cql_column.concrete = True\n cql_column.many_to_many = False\n cql_column.many_to_one = False\n cql_column.one_to_many = False\n cql_column.one_to_one = False\n cql_column.is_relation = False\n cql_column.remote_field = None\n cql_column.unique_for_date = None\n cql_column.unique_for_month = None\n cql_column.unique_for_year = None\n cql_column.db_column = None\n cql_column.rel = None\n cql_column.attname = name\n cql_column.field = cql_column\n cql_column.model = self.model_inst\n cql_column.name = cql_column.db_field_name\n cql_column.verbose_name = cql_column.db_field_name\n cql_column._verbose_name = cql_column.db_field_name\n cql_column.field.related_query_name = lambda: None\n"
] | class DjangoCassandraOptions(options.Options):
default_field_error_messages = {
'invalid_choice': _('Value %(value)r is not a valid choice.'),
'null': _('This field cannot be null.'),
'blank': _('This field cannot be blank.'),
'unique': _('%(model_name)s with this %(field_label)s '
'already exists.'),
'unique_for_date': _("%(field_label)s must be unique for "
"%(date_field_label)s %(lookup_type)s."),
}
def __init__(self, *args, **kwargs):
self.model_inst = kwargs.pop('cls')
self._defined_columns = self.model_inst._defined_columns
# Add Django attibutes to Columns
self._give_columns_django_field_attributes()
# Call Django to create _meta object
super(DjangoCassandraOptions, self).__init__(*args, **kwargs)
self._private_fields_name = 'private_fields'
if hasattr(self, 'virtual_fields'):
# Django < 1.10
self._private_fields_name = 'virtual_fields'
# Add Columns as Django Fields
for column in six.itervalues(self._defined_columns):
self.add_field(column)
self.setup_pk()
# Set further _meta attributes explicitly
self.proxy_for_model = self.concrete_model = self.model_inst
self.managed = False
self.swappable = False
def can_migrate(self, *args, **kwargs):
return False
def get_all_related_objects_with_model(self, *args, **kwargs):
return []
@property
def related_objects(self):
return []
def setup_pk(self):
self.pk = self.model_inst._get_explicit_pk_column()
def add_field(self, field, **kwargs):
"""Add each field as a private field."""
getattr(self, self._private_fields_name).append(field)
self._expire_cache(reverse=True)
self._expire_cache(reverse=False)
def _get_fields(self, *args, **kwargs):
fields = six.itervalues(self._defined_columns)
return options.make_immutable_fields_list('get_fields()', fields)
def _set_column_django_attributes(self, cql_column, name):
allow_null = (
(not cql_column.required and
not cql_column.is_primary_key and
not cql_column.partition_key) or cql_column.has_default and not cql_column.required
)
cql_column.error_messages = self.default_field_error_messages
cql_column.empty_values = list(validators.EMPTY_VALUES)
cql_column.db_index = cql_column.index
cql_column.serialize = True
cql_column.unique = cql_column.is_primary_key
cql_column.hidden = False
cql_column.auto_created = False
cql_column.help_text = ''
cql_column.blank = allow_null
cql_column.null = allow_null
cql_column.choices = []
cql_column.flatchoices = []
cql_column.validators = []
cql_column.editable = True
cql_column.concrete = True
cql_column.many_to_many = False
cql_column.many_to_one = False
cql_column.one_to_many = False
cql_column.one_to_one = False
cql_column.is_relation = False
cql_column.remote_field = None
cql_column.unique_for_date = None
cql_column.unique_for_month = None
cql_column.unique_for_year = None
cql_column.db_column = None
cql_column.rel = None
cql_column.attname = name
cql_column.field = cql_column
cql_column.model = self.model_inst
cql_column.name = cql_column.db_field_name
cql_column.verbose_name = cql_column.db_field_name
cql_column._verbose_name = cql_column.db_field_name
cql_column.field.related_query_name = lambda: None
|
r4fek/django-cassandra-engine | django_cassandra_engine/models/__init__.py | DjangoCassandraModel._get_column | python | def _get_column(cls, name):
if name == 'pk':
return cls._meta.get_field(cls._meta.pk.name)
return cls._columns[name] | Based on cqlengine.models.BaseModel._get_column.
But to work with 'pk' | train | https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/models/__init__.py#L833-L841 | null | class DjangoCassandraModel(
six.with_metaclass(DjangoCassandraModelMetaClass, BaseModel)
):
__queryset__ = DjangoCassandraQuerySet
__abstract__ = True
__table_name__ = None
__table_name_case_sensitive__ = False
__keyspace__ = None
__options__ = None
__discriminator_value__ = None
__compute_routing_key__ = True
def __init__(self, *args, **kwargs):
super(DjangoCassandraModel, self).__init__(*args, **kwargs)
methods = inspect.getmembers(django_model_methods, inspect.isfunction)
for method_name, method in methods:
new_method = partial(method, self)
setattr(self, method_name, new_method)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
@classmethod
def get(cls, *args, **kwargs):
raise AttributeError('model has no attribute \'get\'')
@classmethod
def filter(cls, *args, **kwargs):
raise AttributeError('model has no attribute \'filter\'')
@classmethod
def all(cls, *args, **kwargs):
raise AttributeError('model has no attribute \'all\'')
@classmethod
def _get_primary_key_columns(cls):
return tuple(c for c in six.itervalues(cls._columns)
if c.is_primary_key is True)
@classmethod
def _get_primary_key_column_names(cls):
return tuple(c.name for c in cls._get_primary_key_columns())
@classmethod
@classmethod
def _get_explicit_pk_column(cls):
try:
if len(cls._primary_keys) > 1:
try:
pk_field = cls.Meta.get_pk_field
except AttributeError:
raise RuntimeError(PK_META_MISSING_HELP.format(cls))
return cls._primary_keys[pk_field]
else:
return list(six.itervalues(cls._primary_keys))[0]
except IndexError:
return None
|
fabric/fabric | fabric/transfer.py | Transfer.get | python | def get(self, remote, local=None, preserve_mode=True):
# TODO: how does this API change if we want to implement
# remote-to-remote file transfer? (Is that even realistic?)
# TODO: handle v1's string interpolation bits, especially the default
# one, or at least think about how that would work re: split between
# single and multiple server targets.
# TODO: callback support
# TODO: how best to allow changing the behavior/semantics of
# remote/local (e.g. users might want 'safer' behavior that complains
# instead of overwriting existing files) - this likely ties into the
# "how to handle recursive/rsync" and "how to handle scp" questions
# Massage remote path
if not remote:
raise ValueError("Remote path must not be empty!")
orig_remote = remote
remote = posixpath.join(
self.sftp.getcwd() or self.sftp.normalize("."), remote
)
# Massage local path:
# - handle file-ness
# - if path, fill with remote name if empty, & make absolute
orig_local = local
is_file_like = hasattr(local, "write") and callable(local.write)
if not local:
local = posixpath.basename(remote)
if not is_file_like:
local = os.path.abspath(local)
# Run Paramiko-level .get() (side-effects only. womp.)
# TODO: push some of the path handling into Paramiko; it should be
# responsible for dealing with path cleaning etc.
# TODO: probably preserve warning message from v1 when overwriting
# existing files. Use logging for that obviously.
#
# If local appears to be a file-like object, use sftp.getfo, not get
if is_file_like:
self.sftp.getfo(remotepath=remote, fl=local)
else:
self.sftp.get(remotepath=remote, localpath=local)
# Set mode to same as remote end
# TODO: Push this down into SFTPClient sometime (requires backwards
# incompat release.)
if preserve_mode:
remote_mode = self.sftp.stat(remote).st_mode
mode = stat.S_IMODE(remote_mode)
os.chmod(local, mode)
# Return something useful
return Result(
orig_remote=orig_remote,
remote=remote,
orig_local=orig_local,
local=local,
connection=self.connection,
) | Download a file from the current connection to the local filesystem.
:param str remote:
Remote file to download.
May be absolute, or relative to the remote working directory.
.. note::
Most SFTP servers set the remote working directory to the
connecting user's home directory, and (unlike most shells) do
*not* expand tildes (``~``).
For example, instead of saying ``get("~/tmp/archive.tgz")``,
say ``get("tmp/archive.tgz")``.
:param local:
Local path to store downloaded file in, or a file-like object.
**If None or another 'falsey'/empty value is given** (the default),
the remote file is downloaded to the current working directory (as
seen by `os.getcwd`) using its remote filename.
**If a string is given**, it should be a path to a local directory
or file and is subject to similar behavior as that seen by common
Unix utilities or OpenSSH's ``sftp`` or ``scp`` tools.
For example, if the local path is a directory, the remote path's
base filename will be added onto it (so ``get('foo/bar/file.txt',
'/tmp/')`` would result in creation or overwriting of
``/tmp/file.txt``).
.. note::
When dealing with nonexistent file paths, normal Python file
handling concerns come into play - for example, a ``local``
path containing non-leaf directories which do not exist, will
typically result in an `OSError`.
**If a file-like object is given**, the contents of the remote file
are simply written into it.
:param bool preserve_mode:
Whether to `os.chmod` the local file so it matches the remote
file's mode (default: ``True``).
:returns: A `.Result` object.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/transfer.py#L41-L145 | null | class Transfer(object):
"""
`.Connection`-wrapping class responsible for managing file upload/download.
.. versionadded:: 2.0
"""
# TODO: SFTP clear default, but how to do SCP? subclass? init kwarg?
def __init__(self, connection):
self.connection = connection
@property
def sftp(self):
return self.connection.sftp()
def is_remote_dir(self, path):
try:
return stat.S_ISDIR(self.sftp.stat(path).st_mode)
except IOError:
return False
def put(self, local, remote=None, preserve_mode=True):
"""
Upload a file from the local filesystem to the current connection.
:param local:
Local path of file to upload, or a file-like object.
**If a string is given**, it should be a path to a local (regular)
file (not a directory).
.. note::
When dealing with nonexistent file paths, normal Python file
handling concerns come into play - for example, trying to
upload a nonexistent ``local`` path will typically result in an
`OSError`.
**If a file-like object is given**, its contents are written to the
remote file path.
:param str remote:
Remote path to which the local file will be written.
.. note::
Most SFTP servers set the remote working directory to the
connecting user's home directory, and (unlike most shells) do
*not* expand tildes (``~``).
For example, instead of saying ``put("archive.tgz",
"~/tmp/")``, say ``put("archive.tgz", "tmp/")``.
In addition, this means that 'falsey'/empty values (such as the
default value, ``None``) are allowed and result in uploading to
the remote home directory.
.. note::
When ``local`` is a file-like object, ``remote`` is required
and must refer to a valid file path (not a directory).
:param bool preserve_mode:
Whether to ``chmod`` the remote file so it matches the local file's
mode (default: ``True``).
:returns: A `.Result` object.
.. versionadded:: 2.0
"""
if not local:
raise ValueError("Local path must not be empty!")
is_file_like = hasattr(local, "write") and callable(local.write)
# Massage remote path
orig_remote = remote
if is_file_like:
local_base = getattr(local, "name", None)
else:
local_base = os.path.basename(local)
if not remote:
if is_file_like:
raise ValueError(
"Must give non-empty remote path when local is a file-like object!" # noqa
)
else:
remote = local_base
debug("Massaged empty remote path into {!r}".format(remote))
elif self.is_remote_dir(remote):
# non-empty local_base implies a) text file path or b) FLO which
# had a non-empty .name attribute. huzzah!
if local_base:
remote = posixpath.join(remote, local_base)
else:
if is_file_like:
raise ValueError(
"Can't put a file-like-object into a directory unless it has a non-empty .name attribute!" # noqa
)
else:
# TODO: can we ever really end up here? implies we want to
# reorganize all this logic so it has fewer potential holes
raise ValueError(
"Somehow got an empty local file basename ({!r}) when uploading to a directory ({!r})!".format( # noqa
local_base, remote
)
)
prejoined_remote = remote
remote = posixpath.join(
self.sftp.getcwd() or self.sftp.normalize("."), remote
)
if remote != prejoined_remote:
msg = "Massaged relative remote path {!r} into {!r}"
debug(msg.format(prejoined_remote, remote))
# Massage local path
orig_local = local
if not is_file_like:
local = os.path.abspath(local)
if local != orig_local:
debug(
"Massaged relative local path {!r} into {!r}".format(
orig_local, local
)
) # noqa
# Run Paramiko-level .put() (side-effects only. womp.)
# TODO: push some of the path handling into Paramiko; it should be
# responsible for dealing with path cleaning etc.
# TODO: probably preserve warning message from v1 when overwriting
# existing files. Use logging for that obviously.
#
# If local appears to be a file-like object, use sftp.putfo, not put
if is_file_like:
msg = "Uploading file-like object {!r} to {!r}"
debug(msg.format(local, remote))
pointer = local.tell()
try:
local.seek(0)
self.sftp.putfo(fl=local, remotepath=remote)
finally:
local.seek(pointer)
else:
debug("Uploading {!r} to {!r}".format(local, remote))
self.sftp.put(localpath=local, remotepath=remote)
# Set mode to same as local end
# TODO: Push this down into SFTPClient sometime (requires backwards
# incompat release.)
if preserve_mode:
local_mode = os.stat(local).st_mode
mode = stat.S_IMODE(local_mode)
self.sftp.chmod(remote, mode)
# Return something useful
return Result(
orig_remote=orig_remote,
remote=remote,
orig_local=orig_local,
local=local,
connection=self.connection,
)
|
fabric/fabric | fabric/transfer.py | Transfer.put | python | def put(self, local, remote=None, preserve_mode=True):
if not local:
raise ValueError("Local path must not be empty!")
is_file_like = hasattr(local, "write") and callable(local.write)
# Massage remote path
orig_remote = remote
if is_file_like:
local_base = getattr(local, "name", None)
else:
local_base = os.path.basename(local)
if not remote:
if is_file_like:
raise ValueError(
"Must give non-empty remote path when local is a file-like object!" # noqa
)
else:
remote = local_base
debug("Massaged empty remote path into {!r}".format(remote))
elif self.is_remote_dir(remote):
# non-empty local_base implies a) text file path or b) FLO which
# had a non-empty .name attribute. huzzah!
if local_base:
remote = posixpath.join(remote, local_base)
else:
if is_file_like:
raise ValueError(
"Can't put a file-like-object into a directory unless it has a non-empty .name attribute!" # noqa
)
else:
# TODO: can we ever really end up here? implies we want to
# reorganize all this logic so it has fewer potential holes
raise ValueError(
"Somehow got an empty local file basename ({!r}) when uploading to a directory ({!r})!".format( # noqa
local_base, remote
)
)
prejoined_remote = remote
remote = posixpath.join(
self.sftp.getcwd() or self.sftp.normalize("."), remote
)
if remote != prejoined_remote:
msg = "Massaged relative remote path {!r} into {!r}"
debug(msg.format(prejoined_remote, remote))
# Massage local path
orig_local = local
if not is_file_like:
local = os.path.abspath(local)
if local != orig_local:
debug(
"Massaged relative local path {!r} into {!r}".format(
orig_local, local
)
) # noqa
# Run Paramiko-level .put() (side-effects only. womp.)
# TODO: push some of the path handling into Paramiko; it should be
# responsible for dealing with path cleaning etc.
# TODO: probably preserve warning message from v1 when overwriting
# existing files. Use logging for that obviously.
#
# If local appears to be a file-like object, use sftp.putfo, not put
if is_file_like:
msg = "Uploading file-like object {!r} to {!r}"
debug(msg.format(local, remote))
pointer = local.tell()
try:
local.seek(0)
self.sftp.putfo(fl=local, remotepath=remote)
finally:
local.seek(pointer)
else:
debug("Uploading {!r} to {!r}".format(local, remote))
self.sftp.put(localpath=local, remotepath=remote)
# Set mode to same as local end
# TODO: Push this down into SFTPClient sometime (requires backwards
# incompat release.)
if preserve_mode:
local_mode = os.stat(local).st_mode
mode = stat.S_IMODE(local_mode)
self.sftp.chmod(remote, mode)
# Return something useful
return Result(
orig_remote=orig_remote,
remote=remote,
orig_local=orig_local,
local=local,
connection=self.connection,
) | Upload a file from the local filesystem to the current connection.
:param local:
Local path of file to upload, or a file-like object.
**If a string is given**, it should be a path to a local (regular)
file (not a directory).
.. note::
When dealing with nonexistent file paths, normal Python file
handling concerns come into play - for example, trying to
upload a nonexistent ``local`` path will typically result in an
`OSError`.
**If a file-like object is given**, its contents are written to the
remote file path.
:param str remote:
Remote path to which the local file will be written.
.. note::
Most SFTP servers set the remote working directory to the
connecting user's home directory, and (unlike most shells) do
*not* expand tildes (``~``).
For example, instead of saying ``put("archive.tgz",
"~/tmp/")``, say ``put("archive.tgz", "tmp/")``.
In addition, this means that 'falsey'/empty values (such as the
default value, ``None``) are allowed and result in uploading to
the remote home directory.
.. note::
When ``local`` is a file-like object, ``remote`` is required
and must refer to a valid file path (not a directory).
:param bool preserve_mode:
Whether to ``chmod`` the remote file so it matches the local file's
mode (default: ``True``).
:returns: A `.Result` object.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/transfer.py#L147-L283 | null | class Transfer(object):
"""
`.Connection`-wrapping class responsible for managing file upload/download.
.. versionadded:: 2.0
"""
# TODO: SFTP clear default, but how to do SCP? subclass? init kwarg?
def __init__(self, connection):
self.connection = connection
@property
def sftp(self):
return self.connection.sftp()
def is_remote_dir(self, path):
try:
return stat.S_ISDIR(self.sftp.stat(path).st_mode)
except IOError:
return False
def get(self, remote, local=None, preserve_mode=True):
"""
Download a file from the current connection to the local filesystem.
:param str remote:
Remote file to download.
May be absolute, or relative to the remote working directory.
.. note::
Most SFTP servers set the remote working directory to the
connecting user's home directory, and (unlike most shells) do
*not* expand tildes (``~``).
For example, instead of saying ``get("~/tmp/archive.tgz")``,
say ``get("tmp/archive.tgz")``.
:param local:
Local path to store downloaded file in, or a file-like object.
**If None or another 'falsey'/empty value is given** (the default),
the remote file is downloaded to the current working directory (as
seen by `os.getcwd`) using its remote filename.
**If a string is given**, it should be a path to a local directory
or file and is subject to similar behavior as that seen by common
Unix utilities or OpenSSH's ``sftp`` or ``scp`` tools.
For example, if the local path is a directory, the remote path's
base filename will be added onto it (so ``get('foo/bar/file.txt',
'/tmp/')`` would result in creation or overwriting of
``/tmp/file.txt``).
.. note::
When dealing with nonexistent file paths, normal Python file
handling concerns come into play - for example, a ``local``
path containing non-leaf directories which do not exist, will
typically result in an `OSError`.
**If a file-like object is given**, the contents of the remote file
are simply written into it.
:param bool preserve_mode:
Whether to `os.chmod` the local file so it matches the remote
file's mode (default: ``True``).
:returns: A `.Result` object.
.. versionadded:: 2.0
"""
# TODO: how does this API change if we want to implement
# remote-to-remote file transfer? (Is that even realistic?)
# TODO: handle v1's string interpolation bits, especially the default
# one, or at least think about how that would work re: split between
# single and multiple server targets.
# TODO: callback support
# TODO: how best to allow changing the behavior/semantics of
# remote/local (e.g. users might want 'safer' behavior that complains
# instead of overwriting existing files) - this likely ties into the
# "how to handle recursive/rsync" and "how to handle scp" questions
# Massage remote path
if not remote:
raise ValueError("Remote path must not be empty!")
orig_remote = remote
remote = posixpath.join(
self.sftp.getcwd() or self.sftp.normalize("."), remote
)
# Massage local path:
# - handle file-ness
# - if path, fill with remote name if empty, & make absolute
orig_local = local
is_file_like = hasattr(local, "write") and callable(local.write)
if not local:
local = posixpath.basename(remote)
if not is_file_like:
local = os.path.abspath(local)
# Run Paramiko-level .get() (side-effects only. womp.)
# TODO: push some of the path handling into Paramiko; it should be
# responsible for dealing with path cleaning etc.
# TODO: probably preserve warning message from v1 when overwriting
# existing files. Use logging for that obviously.
#
# If local appears to be a file-like object, use sftp.getfo, not get
if is_file_like:
self.sftp.getfo(remotepath=remote, fl=local)
else:
self.sftp.get(remotepath=remote, localpath=local)
# Set mode to same as remote end
# TODO: Push this down into SFTPClient sometime (requires backwards
# incompat release.)
if preserve_mode:
remote_mode = self.sftp.stat(remote).st_mode
mode = stat.S_IMODE(remote_mode)
os.chmod(local, mode)
# Return something useful
return Result(
orig_remote=orig_remote,
remote=remote,
orig_local=orig_local,
local=local,
connection=self.connection,
)
|
fabric/fabric | fabric/config.py | Config.load_ssh_config | python | def load_ssh_config(self):
# Update the runtime SSH config path (assumes enough regular config
# levels have been loaded that anyone wanting to transmit this info
# from a 'vanilla' Invoke config, has gotten it set.)
if self.ssh_config_path:
self._runtime_ssh_path = self.ssh_config_path
# Load files from disk if we weren't given an explicit SSHConfig in
# __init__
if not self._given_explicit_object:
self._load_ssh_files() | Load SSH config file(s) from disk.
Also (beforehand) ensures that Invoke-level config re: runtime SSH
config file paths, is accounted for.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/config.py#L110-L127 | null | class Config(InvokeConfig):
"""
An `invoke.config.Config` subclass with extra Fabric-related behavior.
This class behaves like `invoke.config.Config` in every way, with the
following exceptions:
- its `global_defaults` staticmethod has been extended to add/modify some
default settings (see its documentation, below, for details);
- it triggers loading of Fabric-specific env vars (e.g.
``FABRIC_RUN_HIDE=true`` instead of ``INVOKE_RUN_HIDE=true``) and
filenames (e.g. ``/etc/fabric.yaml`` instead of ``/etc/invoke.yaml``).
- it extends the API to account for loading ``ssh_config`` files (which are
stored as additional attributes and have no direct relation to the
regular config data/hierarchy.)
Intended for use with `.Connection`, as using vanilla
`invoke.config.Config` objects would require users to manually define
``port``, ``user`` and so forth.
.. seealso:: :doc:`/concepts/configuration`, :ref:`ssh-config`
.. versionadded:: 2.0
"""
prefix = "fabric"
def __init__(self, *args, **kwargs):
"""
Creates a new Fabric-specific config object.
For most API details, see `invoke.config.Config.__init__`. Parameters
new to this subclass are listed below.
:param ssh_config:
Custom/explicit `paramiko.config.SSHConfig` object. If given,
prevents loading of any SSH config files. Default: ``None``.
:param str runtime_ssh_path:
Runtime SSH config path to load. Prevents loading of system/user
files if given. Default: ``None``.
:param str system_ssh_path:
Location of the system-level SSH config file. Default:
``/etc/ssh/ssh_config``.
:param str user_ssh_path:
Location of the user-level SSH config file. Default:
``~/.ssh/config``.
:param bool lazy:
Has the same meaning as the parent class' ``lazy``, but additionall
controls whether SSH config file loading is deferred (requires
manually calling `load_ssh_config` sometime.) For example, one may
need to wait for user input before calling `set_runtime_ssh_path`,
which will inform exactly what `load_ssh_config` does.
"""
# Tease out our own kwargs.
# TODO: consider moving more stuff out of __init__ and into methods so
# there's less of this sort of splat-args + pop thing? Eh.
ssh_config = kwargs.pop("ssh_config", None)
lazy = kwargs.get("lazy", False)
self.set_runtime_ssh_path(kwargs.pop("runtime_ssh_path", None))
system_path = kwargs.pop("system_ssh_path", "/etc/ssh/ssh_config")
self._set(_system_ssh_path=system_path)
self._set(_user_ssh_path=kwargs.pop("user_ssh_path", "~/.ssh/config"))
# Record whether we were given an explicit object (so other steps know
# whether to bother loading from disk or not)
# This needs doing before super __init__ as that calls our post_init
explicit = ssh_config is not None
self._set(_given_explicit_object=explicit)
# Arrive at some non-None SSHConfig object (upon which to run .parse()
# later, in _load_ssh_file())
if ssh_config is None:
ssh_config = SSHConfig()
self._set(base_ssh_config=ssh_config)
# Now that our own attributes have been prepared & kwargs yanked, we
# can fall up into parent __init__()
super(Config, self).__init__(*args, **kwargs)
# And finally perform convenience non-lazy bits if needed
if not lazy:
self.load_ssh_config()
def set_runtime_ssh_path(self, path):
"""
Configure a runtime-level SSH config file path.
If set, this will cause `load_ssh_config` to skip system and user
files, as OpenSSH does.
.. versionadded:: 2.0
"""
self._set(_runtime_ssh_path=path)
def clone(self, *args, **kwargs):
# TODO: clone() at this point kinda-sorta feels like it's retreading
# __reduce__ and the related (un)pickling stuff...
# Get cloned obj.
# NOTE: Because we also extend .init_kwargs, the actual core SSHConfig
# data is passed in at init time (ensuring no files get loaded a 2nd,
# etc time) and will already be present, so we don't need to set
# .base_ssh_config ourselves. Similarly, there's no need to worry about
# how the SSH config paths may be inaccurate until below; nothing will
# be referencing them.
new = super(Config, self).clone(*args, **kwargs)
# Copy over our custom attributes, so that the clone still resembles us
# re: recording where the data originally came from (in case anything
# re-runs ._load_ssh_files(), for example).
for attr in (
"_runtime_ssh_path",
"_system_ssh_path",
"_user_ssh_path",
):
setattr(new, attr, getattr(self, attr))
# Load SSH configs, in case they weren't prior to now (e.g. a vanilla
# Invoke clone(into), instead of a us-to-us clone.)
self.load_ssh_config()
# All done
return new
def _clone_init_kwargs(self, *args, **kw):
# Parent kwargs
kwargs = super(Config, self)._clone_init_kwargs(*args, **kw)
# Transmit our internal SSHConfig via explicit-obj kwarg, thus
# bypassing any file loading. (Our extension of clone() above copies
# over other attributes as well so that the end result looks consistent
# with reality.)
new_config = SSHConfig()
# TODO: as with other spots, this implies SSHConfig needs a cleaner
# public API re: creating and updating its core data.
new_config._config = copy.deepcopy(self.base_ssh_config._config)
return dict(kwargs, ssh_config=new_config)
def _load_ssh_files(self):
"""
Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``.
"""
# TODO: does this want to more closely ape the behavior of
# InvokeConfig.load_files? re: having a _found attribute for each that
# determines whether to load or skip
if self._runtime_ssh_path is not None:
path = self._runtime_ssh_path
# Manually blow up like open() (_load_ssh_file normally doesn't)
if not os.path.exists(path):
msg = "No such file or directory: {!r}".format(path)
raise IOError(errno.ENOENT, msg)
self._load_ssh_file(os.path.expanduser(path))
elif self.load_ssh_configs:
for path in (self._user_ssh_path, self._system_ssh_path):
self._load_ssh_file(os.path.expanduser(path))
def _load_ssh_file(self, path):
"""
Attempt to open and parse an SSH config file at ``path``.
Does nothing if ``path`` is not a path to a valid file.
:returns: ``None``.
"""
if os.path.isfile(path):
old_rules = len(self.base_ssh_config._config)
with open(path) as fd:
self.base_ssh_config.parse(fd)
new_rules = len(self.base_ssh_config._config)
msg = "Loaded {} new ssh_config rules from {!r}"
debug(msg.format(new_rules - old_rules, path))
else:
debug("File not found, skipping")
@staticmethod
def global_defaults():
"""
Default configuration values and behavior toggles.
Fabric only extends this method in order to make minor adjustments and
additions to Invoke's `~invoke.config.Config.global_defaults`; see its
documentation for the base values, such as the config subtrees
controlling behavior of ``run`` or how ``tasks`` behave.
For Fabric-specific modifications and additions to the Invoke-level
defaults, see our own config docs at :ref:`default-values`.
.. versionadded:: 2.0
"""
# TODO: hrm should the run-related things actually be derived from the
# runner_class? E.g. Local defines local stuff, Remote defines remote
# stuff? Doesn't help with the final config tree tho...
# TODO: as to that, this is a core problem, Fabric wants split
# local/remote stuff, eg replace_env wants to be False for local and
# True remotely; shell wants to differ depending on target (and either
# way, does not want to use local interrogation for remote)
# TODO: is it worth moving all of our 'new' settings to a discrete
# namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc.
# It wouldn't actually simplify this code any, but it would make it
# easier for users to determine what came from which library/repo.
defaults = InvokeConfig.global_defaults()
ours = {
# New settings
"connect_kwargs": {},
"forward_agent": False,
"gateway": None,
"load_ssh_configs": True,
"port": 22,
"run": {"replace_env": True},
"runners": {"remote": Remote},
"ssh_config_path": None,
"tasks": {"collection_name": "fabfile"},
# TODO: this becomes an override/extend once Invoke grows execution
# timeouts (which should be timeouts.execute)
"timeouts": {"connect": None},
"user": get_local_user(),
}
merge_dicts(defaults, ours)
return defaults
|
fabric/fabric | fabric/config.py | Config._load_ssh_files | python | def _load_ssh_files(self):
# TODO: does this want to more closely ape the behavior of
# InvokeConfig.load_files? re: having a _found attribute for each that
# determines whether to load or skip
if self._runtime_ssh_path is not None:
path = self._runtime_ssh_path
# Manually blow up like open() (_load_ssh_file normally doesn't)
if not os.path.exists(path):
msg = "No such file or directory: {!r}".format(path)
raise IOError(errno.ENOENT, msg)
self._load_ssh_file(os.path.expanduser(path))
elif self.load_ssh_configs:
for path in (self._user_ssh_path, self._system_ssh_path):
self._load_ssh_file(os.path.expanduser(path)) | Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``. | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/config.py#L168-L189 | null | class Config(InvokeConfig):
"""
An `invoke.config.Config` subclass with extra Fabric-related behavior.
This class behaves like `invoke.config.Config` in every way, with the
following exceptions:
- its `global_defaults` staticmethod has been extended to add/modify some
default settings (see its documentation, below, for details);
- it triggers loading of Fabric-specific env vars (e.g.
``FABRIC_RUN_HIDE=true`` instead of ``INVOKE_RUN_HIDE=true``) and
filenames (e.g. ``/etc/fabric.yaml`` instead of ``/etc/invoke.yaml``).
- it extends the API to account for loading ``ssh_config`` files (which are
stored as additional attributes and have no direct relation to the
regular config data/hierarchy.)
Intended for use with `.Connection`, as using vanilla
`invoke.config.Config` objects would require users to manually define
``port``, ``user`` and so forth.
.. seealso:: :doc:`/concepts/configuration`, :ref:`ssh-config`
.. versionadded:: 2.0
"""
prefix = "fabric"
def __init__(self, *args, **kwargs):
"""
Creates a new Fabric-specific config object.
For most API details, see `invoke.config.Config.__init__`. Parameters
new to this subclass are listed below.
:param ssh_config:
Custom/explicit `paramiko.config.SSHConfig` object. If given,
prevents loading of any SSH config files. Default: ``None``.
:param str runtime_ssh_path:
Runtime SSH config path to load. Prevents loading of system/user
files if given. Default: ``None``.
:param str system_ssh_path:
Location of the system-level SSH config file. Default:
``/etc/ssh/ssh_config``.
:param str user_ssh_path:
Location of the user-level SSH config file. Default:
``~/.ssh/config``.
:param bool lazy:
Has the same meaning as the parent class' ``lazy``, but additionall
controls whether SSH config file loading is deferred (requires
manually calling `load_ssh_config` sometime.) For example, one may
need to wait for user input before calling `set_runtime_ssh_path`,
which will inform exactly what `load_ssh_config` does.
"""
# Tease out our own kwargs.
# TODO: consider moving more stuff out of __init__ and into methods so
# there's less of this sort of splat-args + pop thing? Eh.
ssh_config = kwargs.pop("ssh_config", None)
lazy = kwargs.get("lazy", False)
self.set_runtime_ssh_path(kwargs.pop("runtime_ssh_path", None))
system_path = kwargs.pop("system_ssh_path", "/etc/ssh/ssh_config")
self._set(_system_ssh_path=system_path)
self._set(_user_ssh_path=kwargs.pop("user_ssh_path", "~/.ssh/config"))
# Record whether we were given an explicit object (so other steps know
# whether to bother loading from disk or not)
# This needs doing before super __init__ as that calls our post_init
explicit = ssh_config is not None
self._set(_given_explicit_object=explicit)
# Arrive at some non-None SSHConfig object (upon which to run .parse()
# later, in _load_ssh_file())
if ssh_config is None:
ssh_config = SSHConfig()
self._set(base_ssh_config=ssh_config)
# Now that our own attributes have been prepared & kwargs yanked, we
# can fall up into parent __init__()
super(Config, self).__init__(*args, **kwargs)
# And finally perform convenience non-lazy bits if needed
if not lazy:
self.load_ssh_config()
def set_runtime_ssh_path(self, path):
"""
Configure a runtime-level SSH config file path.
If set, this will cause `load_ssh_config` to skip system and user
files, as OpenSSH does.
.. versionadded:: 2.0
"""
self._set(_runtime_ssh_path=path)
def load_ssh_config(self):
"""
Load SSH config file(s) from disk.
Also (beforehand) ensures that Invoke-level config re: runtime SSH
config file paths, is accounted for.
.. versionadded:: 2.0
"""
# Update the runtime SSH config path (assumes enough regular config
# levels have been loaded that anyone wanting to transmit this info
# from a 'vanilla' Invoke config, has gotten it set.)
if self.ssh_config_path:
self._runtime_ssh_path = self.ssh_config_path
# Load files from disk if we weren't given an explicit SSHConfig in
# __init__
if not self._given_explicit_object:
self._load_ssh_files()
def clone(self, *args, **kwargs):
# TODO: clone() at this point kinda-sorta feels like it's retreading
# __reduce__ and the related (un)pickling stuff...
# Get cloned obj.
# NOTE: Because we also extend .init_kwargs, the actual core SSHConfig
# data is passed in at init time (ensuring no files get loaded a 2nd,
# etc time) and will already be present, so we don't need to set
# .base_ssh_config ourselves. Similarly, there's no need to worry about
# how the SSH config paths may be inaccurate until below; nothing will
# be referencing them.
new = super(Config, self).clone(*args, **kwargs)
# Copy over our custom attributes, so that the clone still resembles us
# re: recording where the data originally came from (in case anything
# re-runs ._load_ssh_files(), for example).
for attr in (
"_runtime_ssh_path",
"_system_ssh_path",
"_user_ssh_path",
):
setattr(new, attr, getattr(self, attr))
# Load SSH configs, in case they weren't prior to now (e.g. a vanilla
# Invoke clone(into), instead of a us-to-us clone.)
self.load_ssh_config()
# All done
return new
def _clone_init_kwargs(self, *args, **kw):
# Parent kwargs
kwargs = super(Config, self)._clone_init_kwargs(*args, **kw)
# Transmit our internal SSHConfig via explicit-obj kwarg, thus
# bypassing any file loading. (Our extension of clone() above copies
# over other attributes as well so that the end result looks consistent
# with reality.)
new_config = SSHConfig()
# TODO: as with other spots, this implies SSHConfig needs a cleaner
# public API re: creating and updating its core data.
new_config._config = copy.deepcopy(self.base_ssh_config._config)
return dict(kwargs, ssh_config=new_config)
def _load_ssh_file(self, path):
"""
Attempt to open and parse an SSH config file at ``path``.
Does nothing if ``path`` is not a path to a valid file.
:returns: ``None``.
"""
if os.path.isfile(path):
old_rules = len(self.base_ssh_config._config)
with open(path) as fd:
self.base_ssh_config.parse(fd)
new_rules = len(self.base_ssh_config._config)
msg = "Loaded {} new ssh_config rules from {!r}"
debug(msg.format(new_rules - old_rules, path))
else:
debug("File not found, skipping")
@staticmethod
def global_defaults():
"""
Default configuration values and behavior toggles.
Fabric only extends this method in order to make minor adjustments and
additions to Invoke's `~invoke.config.Config.global_defaults`; see its
documentation for the base values, such as the config subtrees
controlling behavior of ``run`` or how ``tasks`` behave.
For Fabric-specific modifications and additions to the Invoke-level
defaults, see our own config docs at :ref:`default-values`.
.. versionadded:: 2.0
"""
# TODO: hrm should the run-related things actually be derived from the
# runner_class? E.g. Local defines local stuff, Remote defines remote
# stuff? Doesn't help with the final config tree tho...
# TODO: as to that, this is a core problem, Fabric wants split
# local/remote stuff, eg replace_env wants to be False for local and
# True remotely; shell wants to differ depending on target (and either
# way, does not want to use local interrogation for remote)
# TODO: is it worth moving all of our 'new' settings to a discrete
# namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc.
# It wouldn't actually simplify this code any, but it would make it
# easier for users to determine what came from which library/repo.
defaults = InvokeConfig.global_defaults()
ours = {
# New settings
"connect_kwargs": {},
"forward_agent": False,
"gateway": None,
"load_ssh_configs": True,
"port": 22,
"run": {"replace_env": True},
"runners": {"remote": Remote},
"ssh_config_path": None,
"tasks": {"collection_name": "fabfile"},
# TODO: this becomes an override/extend once Invoke grows execution
# timeouts (which should be timeouts.execute)
"timeouts": {"connect": None},
"user": get_local_user(),
}
merge_dicts(defaults, ours)
return defaults
|
fabric/fabric | fabric/config.py | Config._load_ssh_file | python | def _load_ssh_file(self, path):
if os.path.isfile(path):
old_rules = len(self.base_ssh_config._config)
with open(path) as fd:
self.base_ssh_config.parse(fd)
new_rules = len(self.base_ssh_config._config)
msg = "Loaded {} new ssh_config rules from {!r}"
debug(msg.format(new_rules - old_rules, path))
else:
debug("File not found, skipping") | Attempt to open and parse an SSH config file at ``path``.
Does nothing if ``path`` is not a path to a valid file.
:returns: ``None``. | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/config.py#L191-L207 | null | class Config(InvokeConfig):
"""
An `invoke.config.Config` subclass with extra Fabric-related behavior.
This class behaves like `invoke.config.Config` in every way, with the
following exceptions:
- its `global_defaults` staticmethod has been extended to add/modify some
default settings (see its documentation, below, for details);
- it triggers loading of Fabric-specific env vars (e.g.
``FABRIC_RUN_HIDE=true`` instead of ``INVOKE_RUN_HIDE=true``) and
filenames (e.g. ``/etc/fabric.yaml`` instead of ``/etc/invoke.yaml``).
- it extends the API to account for loading ``ssh_config`` files (which are
stored as additional attributes and have no direct relation to the
regular config data/hierarchy.)
Intended for use with `.Connection`, as using vanilla
`invoke.config.Config` objects would require users to manually define
``port``, ``user`` and so forth.
.. seealso:: :doc:`/concepts/configuration`, :ref:`ssh-config`
.. versionadded:: 2.0
"""
prefix = "fabric"
def __init__(self, *args, **kwargs):
"""
Creates a new Fabric-specific config object.
For most API details, see `invoke.config.Config.__init__`. Parameters
new to this subclass are listed below.
:param ssh_config:
Custom/explicit `paramiko.config.SSHConfig` object. If given,
prevents loading of any SSH config files. Default: ``None``.
:param str runtime_ssh_path:
Runtime SSH config path to load. Prevents loading of system/user
files if given. Default: ``None``.
:param str system_ssh_path:
Location of the system-level SSH config file. Default:
``/etc/ssh/ssh_config``.
:param str user_ssh_path:
Location of the user-level SSH config file. Default:
``~/.ssh/config``.
:param bool lazy:
Has the same meaning as the parent class' ``lazy``, but additionall
controls whether SSH config file loading is deferred (requires
manually calling `load_ssh_config` sometime.) For example, one may
need to wait for user input before calling `set_runtime_ssh_path`,
which will inform exactly what `load_ssh_config` does.
"""
# Tease out our own kwargs.
# TODO: consider moving more stuff out of __init__ and into methods so
# there's less of this sort of splat-args + pop thing? Eh.
ssh_config = kwargs.pop("ssh_config", None)
lazy = kwargs.get("lazy", False)
self.set_runtime_ssh_path(kwargs.pop("runtime_ssh_path", None))
system_path = kwargs.pop("system_ssh_path", "/etc/ssh/ssh_config")
self._set(_system_ssh_path=system_path)
self._set(_user_ssh_path=kwargs.pop("user_ssh_path", "~/.ssh/config"))
# Record whether we were given an explicit object (so other steps know
# whether to bother loading from disk or not)
# This needs doing before super __init__ as that calls our post_init
explicit = ssh_config is not None
self._set(_given_explicit_object=explicit)
# Arrive at some non-None SSHConfig object (upon which to run .parse()
# later, in _load_ssh_file())
if ssh_config is None:
ssh_config = SSHConfig()
self._set(base_ssh_config=ssh_config)
# Now that our own attributes have been prepared & kwargs yanked, we
# can fall up into parent __init__()
super(Config, self).__init__(*args, **kwargs)
# And finally perform convenience non-lazy bits if needed
if not lazy:
self.load_ssh_config()
def set_runtime_ssh_path(self, path):
"""
Configure a runtime-level SSH config file path.
If set, this will cause `load_ssh_config` to skip system and user
files, as OpenSSH does.
.. versionadded:: 2.0
"""
self._set(_runtime_ssh_path=path)
def load_ssh_config(self):
"""
Load SSH config file(s) from disk.
Also (beforehand) ensures that Invoke-level config re: runtime SSH
config file paths, is accounted for.
.. versionadded:: 2.0
"""
# Update the runtime SSH config path (assumes enough regular config
# levels have been loaded that anyone wanting to transmit this info
# from a 'vanilla' Invoke config, has gotten it set.)
if self.ssh_config_path:
self._runtime_ssh_path = self.ssh_config_path
# Load files from disk if we weren't given an explicit SSHConfig in
# __init__
if not self._given_explicit_object:
self._load_ssh_files()
def clone(self, *args, **kwargs):
# TODO: clone() at this point kinda-sorta feels like it's retreading
# __reduce__ and the related (un)pickling stuff...
# Get cloned obj.
# NOTE: Because we also extend .init_kwargs, the actual core SSHConfig
# data is passed in at init time (ensuring no files get loaded a 2nd,
# etc time) and will already be present, so we don't need to set
# .base_ssh_config ourselves. Similarly, there's no need to worry about
# how the SSH config paths may be inaccurate until below; nothing will
# be referencing them.
new = super(Config, self).clone(*args, **kwargs)
# Copy over our custom attributes, so that the clone still resembles us
# re: recording where the data originally came from (in case anything
# re-runs ._load_ssh_files(), for example).
for attr in (
"_runtime_ssh_path",
"_system_ssh_path",
"_user_ssh_path",
):
setattr(new, attr, getattr(self, attr))
# Load SSH configs, in case they weren't prior to now (e.g. a vanilla
# Invoke clone(into), instead of a us-to-us clone.)
self.load_ssh_config()
# All done
return new
def _clone_init_kwargs(self, *args, **kw):
# Parent kwargs
kwargs = super(Config, self)._clone_init_kwargs(*args, **kw)
# Transmit our internal SSHConfig via explicit-obj kwarg, thus
# bypassing any file loading. (Our extension of clone() above copies
# over other attributes as well so that the end result looks consistent
# with reality.)
new_config = SSHConfig()
# TODO: as with other spots, this implies SSHConfig needs a cleaner
# public API re: creating and updating its core data.
new_config._config = copy.deepcopy(self.base_ssh_config._config)
return dict(kwargs, ssh_config=new_config)
def _load_ssh_files(self):
"""
Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``.
"""
# TODO: does this want to more closely ape the behavior of
# InvokeConfig.load_files? re: having a _found attribute for each that
# determines whether to load or skip
if self._runtime_ssh_path is not None:
path = self._runtime_ssh_path
# Manually blow up like open() (_load_ssh_file normally doesn't)
if not os.path.exists(path):
msg = "No such file or directory: {!r}".format(path)
raise IOError(errno.ENOENT, msg)
self._load_ssh_file(os.path.expanduser(path))
elif self.load_ssh_configs:
for path in (self._user_ssh_path, self._system_ssh_path):
self._load_ssh_file(os.path.expanduser(path))
@staticmethod
def global_defaults():
"""
Default configuration values and behavior toggles.
Fabric only extends this method in order to make minor adjustments and
additions to Invoke's `~invoke.config.Config.global_defaults`; see its
documentation for the base values, such as the config subtrees
controlling behavior of ``run`` or how ``tasks`` behave.
For Fabric-specific modifications and additions to the Invoke-level
defaults, see our own config docs at :ref:`default-values`.
.. versionadded:: 2.0
"""
# TODO: hrm should the run-related things actually be derived from the
# runner_class? E.g. Local defines local stuff, Remote defines remote
# stuff? Doesn't help with the final config tree tho...
# TODO: as to that, this is a core problem, Fabric wants split
# local/remote stuff, eg replace_env wants to be False for local and
# True remotely; shell wants to differ depending on target (and either
# way, does not want to use local interrogation for remote)
# TODO: is it worth moving all of our 'new' settings to a discrete
# namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc.
# It wouldn't actually simplify this code any, but it would make it
# easier for users to determine what came from which library/repo.
defaults = InvokeConfig.global_defaults()
ours = {
# New settings
"connect_kwargs": {},
"forward_agent": False,
"gateway": None,
"load_ssh_configs": True,
"port": 22,
"run": {"replace_env": True},
"runners": {"remote": Remote},
"ssh_config_path": None,
"tasks": {"collection_name": "fabfile"},
# TODO: this becomes an override/extend once Invoke grows execution
# timeouts (which should be timeouts.execute)
"timeouts": {"connect": None},
"user": get_local_user(),
}
merge_dicts(defaults, ours)
return defaults
|
fabric/fabric | fabric/config.py | Config.global_defaults | python | def global_defaults():
# TODO: hrm should the run-related things actually be derived from the
# runner_class? E.g. Local defines local stuff, Remote defines remote
# stuff? Doesn't help with the final config tree tho...
# TODO: as to that, this is a core problem, Fabric wants split
# local/remote stuff, eg replace_env wants to be False for local and
# True remotely; shell wants to differ depending on target (and either
# way, does not want to use local interrogation for remote)
# TODO: is it worth moving all of our 'new' settings to a discrete
# namespace for cleanliness' sake? e.g. ssh.port, ssh.user etc.
# It wouldn't actually simplify this code any, but it would make it
# easier for users to determine what came from which library/repo.
defaults = InvokeConfig.global_defaults()
ours = {
# New settings
"connect_kwargs": {},
"forward_agent": False,
"gateway": None,
"load_ssh_configs": True,
"port": 22,
"run": {"replace_env": True},
"runners": {"remote": Remote},
"ssh_config_path": None,
"tasks": {"collection_name": "fabfile"},
# TODO: this becomes an override/extend once Invoke grows execution
# timeouts (which should be timeouts.execute)
"timeouts": {"connect": None},
"user": get_local_user(),
}
merge_dicts(defaults, ours)
return defaults | Default configuration values and behavior toggles.
Fabric only extends this method in order to make minor adjustments and
additions to Invoke's `~invoke.config.Config.global_defaults`; see its
documentation for the base values, such as the config subtrees
controlling behavior of ``run`` or how ``tasks`` behave.
For Fabric-specific modifications and additions to the Invoke-level
defaults, see our own config docs at :ref:`default-values`.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/config.py#L210-L253 | [
"def get_local_user():\n \"\"\"\n Return the local executing username, or ``None`` if one can't be found.\n\n .. versionadded:: 2.0\n \"\"\"\n # TODO: I don't understand why these lines were added outside the\n # try/except, since presumably it means the attempt at catching ImportError\n # wouldn't work. However, that's how the contributing user committed it.\n # Need an older Windows box to test it out, most likely.\n import getpass\n\n username = None\n # All Unix and most Windows systems support the getpass module.\n try:\n username = getpass.getuser()\n # Some SaaS platforms raise KeyError, implying there is no real user\n # involved. They get the default value of None.\n except KeyError:\n pass\n # Older (?) Windows systems don't support getpass well; they should\n # have the `win32` module instead.\n except ImportError: # pragma: nocover\n if win32:\n import win32api\n import win32security # noqa\n import win32profile # noqa\n\n username = win32api.GetUserName()\n return username\n"
] | class Config(InvokeConfig):
"""
An `invoke.config.Config` subclass with extra Fabric-related behavior.
This class behaves like `invoke.config.Config` in every way, with the
following exceptions:
- its `global_defaults` staticmethod has been extended to add/modify some
default settings (see its documentation, below, for details);
- it triggers loading of Fabric-specific env vars (e.g.
``FABRIC_RUN_HIDE=true`` instead of ``INVOKE_RUN_HIDE=true``) and
filenames (e.g. ``/etc/fabric.yaml`` instead of ``/etc/invoke.yaml``).
- it extends the API to account for loading ``ssh_config`` files (which are
stored as additional attributes and have no direct relation to the
regular config data/hierarchy.)
Intended for use with `.Connection`, as using vanilla
`invoke.config.Config` objects would require users to manually define
``port``, ``user`` and so forth.
.. seealso:: :doc:`/concepts/configuration`, :ref:`ssh-config`
.. versionadded:: 2.0
"""
prefix = "fabric"
def __init__(self, *args, **kwargs):
"""
Creates a new Fabric-specific config object.
For most API details, see `invoke.config.Config.__init__`. Parameters
new to this subclass are listed below.
:param ssh_config:
Custom/explicit `paramiko.config.SSHConfig` object. If given,
prevents loading of any SSH config files. Default: ``None``.
:param str runtime_ssh_path:
Runtime SSH config path to load. Prevents loading of system/user
files if given. Default: ``None``.
:param str system_ssh_path:
Location of the system-level SSH config file. Default:
``/etc/ssh/ssh_config``.
:param str user_ssh_path:
Location of the user-level SSH config file. Default:
``~/.ssh/config``.
:param bool lazy:
Has the same meaning as the parent class' ``lazy``, but additionall
controls whether SSH config file loading is deferred (requires
manually calling `load_ssh_config` sometime.) For example, one may
need to wait for user input before calling `set_runtime_ssh_path`,
which will inform exactly what `load_ssh_config` does.
"""
# Tease out our own kwargs.
# TODO: consider moving more stuff out of __init__ and into methods so
# there's less of this sort of splat-args + pop thing? Eh.
ssh_config = kwargs.pop("ssh_config", None)
lazy = kwargs.get("lazy", False)
self.set_runtime_ssh_path(kwargs.pop("runtime_ssh_path", None))
system_path = kwargs.pop("system_ssh_path", "/etc/ssh/ssh_config")
self._set(_system_ssh_path=system_path)
self._set(_user_ssh_path=kwargs.pop("user_ssh_path", "~/.ssh/config"))
# Record whether we were given an explicit object (so other steps know
# whether to bother loading from disk or not)
# This needs doing before super __init__ as that calls our post_init
explicit = ssh_config is not None
self._set(_given_explicit_object=explicit)
# Arrive at some non-None SSHConfig object (upon which to run .parse()
# later, in _load_ssh_file())
if ssh_config is None:
ssh_config = SSHConfig()
self._set(base_ssh_config=ssh_config)
# Now that our own attributes have been prepared & kwargs yanked, we
# can fall up into parent __init__()
super(Config, self).__init__(*args, **kwargs)
# And finally perform convenience non-lazy bits if needed
if not lazy:
self.load_ssh_config()
def set_runtime_ssh_path(self, path):
"""
Configure a runtime-level SSH config file path.
If set, this will cause `load_ssh_config` to skip system and user
files, as OpenSSH does.
.. versionadded:: 2.0
"""
self._set(_runtime_ssh_path=path)
def load_ssh_config(self):
"""
Load SSH config file(s) from disk.
Also (beforehand) ensures that Invoke-level config re: runtime SSH
config file paths, is accounted for.
.. versionadded:: 2.0
"""
# Update the runtime SSH config path (assumes enough regular config
# levels have been loaded that anyone wanting to transmit this info
# from a 'vanilla' Invoke config, has gotten it set.)
if self.ssh_config_path:
self._runtime_ssh_path = self.ssh_config_path
# Load files from disk if we weren't given an explicit SSHConfig in
# __init__
if not self._given_explicit_object:
self._load_ssh_files()
def clone(self, *args, **kwargs):
# TODO: clone() at this point kinda-sorta feels like it's retreading
# __reduce__ and the related (un)pickling stuff...
# Get cloned obj.
# NOTE: Because we also extend .init_kwargs, the actual core SSHConfig
# data is passed in at init time (ensuring no files get loaded a 2nd,
# etc time) and will already be present, so we don't need to set
# .base_ssh_config ourselves. Similarly, there's no need to worry about
# how the SSH config paths may be inaccurate until below; nothing will
# be referencing them.
new = super(Config, self).clone(*args, **kwargs)
# Copy over our custom attributes, so that the clone still resembles us
# re: recording where the data originally came from (in case anything
# re-runs ._load_ssh_files(), for example).
for attr in (
"_runtime_ssh_path",
"_system_ssh_path",
"_user_ssh_path",
):
setattr(new, attr, getattr(self, attr))
# Load SSH configs, in case they weren't prior to now (e.g. a vanilla
# Invoke clone(into), instead of a us-to-us clone.)
self.load_ssh_config()
# All done
return new
def _clone_init_kwargs(self, *args, **kw):
# Parent kwargs
kwargs = super(Config, self)._clone_init_kwargs(*args, **kw)
# Transmit our internal SSHConfig via explicit-obj kwarg, thus
# bypassing any file loading. (Our extension of clone() above copies
# over other attributes as well so that the end result looks consistent
# with reality.)
new_config = SSHConfig()
# TODO: as with other spots, this implies SSHConfig needs a cleaner
# public API re: creating and updating its core data.
new_config._config = copy.deepcopy(self.base_ssh_config._config)
return dict(kwargs, ssh_config=new_config)
def _load_ssh_files(self):
"""
Trigger loading of configured SSH config file paths.
Expects that ``base_ssh_config`` has already been set to an
`~paramiko.config.SSHConfig` object.
:returns: ``None``.
"""
# TODO: does this want to more closely ape the behavior of
# InvokeConfig.load_files? re: having a _found attribute for each that
# determines whether to load or skip
if self._runtime_ssh_path is not None:
path = self._runtime_ssh_path
# Manually blow up like open() (_load_ssh_file normally doesn't)
if not os.path.exists(path):
msg = "No such file or directory: {!r}".format(path)
raise IOError(errno.ENOENT, msg)
self._load_ssh_file(os.path.expanduser(path))
elif self.load_ssh_configs:
for path in (self._user_ssh_path, self._system_ssh_path):
self._load_ssh_file(os.path.expanduser(path))
def _load_ssh_file(self, path):
"""
Attempt to open and parse an SSH config file at ``path``.
Does nothing if ``path`` is not a path to a valid file.
:returns: ``None``.
"""
if os.path.isfile(path):
old_rules = len(self.base_ssh_config._config)
with open(path) as fd:
self.base_ssh_config.parse(fd)
new_rules = len(self.base_ssh_config._config)
msg = "Loaded {} new ssh_config rules from {!r}"
debug(msg.format(new_rules - old_rules, path))
else:
debug("File not found, skipping")
@staticmethod
|
fabric/fabric | fabric/tunnels.py | Tunnel.read_and_write | python | def read_and_write(self, reader, writer, chunk_size):
data = reader.recv(chunk_size)
if len(data) == 0:
return True
writer.sendall(data) | Read ``chunk_size`` from ``reader``, writing result to ``writer``.
Returns ``None`` if successful, or ``True`` if the read was empty.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/tunnels.py#L146-L157 | null | class Tunnel(ExceptionHandlingThread):
"""
Bidirectionally forward data between an SSH channel and local socket.
.. versionadded:: 2.0
"""
def __init__(self, channel, sock, finished):
self.channel = channel
self.sock = sock
self.finished = finished
self.socket_chunk_size = 1024
self.channel_chunk_size = 1024
super(Tunnel, self).__init__()
def _run(self):
try:
empty_sock, empty_chan = None, None
while not self.finished.is_set():
r, w, x = select.select([self.sock, self.channel], [], [], 1)
if self.sock in r:
empty_sock = self.read_and_write(
self.sock, self.channel, self.socket_chunk_size
)
if self.channel in r:
empty_chan = self.read_and_write(
self.channel, self.sock, self.channel_chunk_size
)
if empty_sock or empty_chan:
break
finally:
self.channel.close()
self.sock.close()
|
fabric/fabric | fabric/util.py | get_local_user | python | def get_local_user():
# TODO: I don't understand why these lines were added outside the
# try/except, since presumably it means the attempt at catching ImportError
# wouldn't work. However, that's how the contributing user committed it.
# Need an older Windows box to test it out, most likely.
import getpass
username = None
# All Unix and most Windows systems support the getpass module.
try:
username = getpass.getuser()
# Some SaaS platforms raise KeyError, implying there is no real user
# involved. They get the default value of None.
except KeyError:
pass
# Older (?) Windows systems don't support getpass well; they should
# have the `win32` module instead.
except ImportError: # pragma: nocover
if win32:
import win32api
import win32security # noqa
import win32profile # noqa
username = win32api.GetUserName()
return username | Return the local executing username, or ``None`` if one can't be found.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/util.py#L16-L45 | null | import logging
import sys
# Ape the half-assed logging junk from Invoke, but ensuring the logger reflects
# our name, not theirs. (Assume most contexts will rely on Invoke itself to
# literally enable/disable logging, for now.)
log = logging.getLogger("fabric")
for x in ("debug",):
globals()[x] = getattr(log, x)
win32 = sys.platform == "win32"
|
fabric/fabric | fabric/executor.py | FabExecutor.parameterize | python | def parameterize(self, call, host):
debug("Parameterizing {!r} for host {!r}".format(call, host))
# Generate a custom ConnectionCall that knows how to yield a Connection
# in its make_context(), specifically one to the host requested here.
clone = call.clone(into=ConnectionCall)
# TODO: using bag-of-attrs is mildly gross but whatever, I'll take it.
clone.host = host
return clone | Parameterize a Call with its Context set to a per-host Config. | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/executor.py#L58-L68 | null | class FabExecutor(Executor):
def expand_calls(self, calls, apply_hosts=True):
# Generate new call list with per-host variants & Connections inserted
ret = []
# TODO: mesh well with Invoke list-type args helper (inv #132)
hosts = []
host_str = self.core[0].args.hosts.value
if apply_hosts and host_str:
hosts = host_str.split(",")
for call in calls:
if isinstance(call, Task):
call = Call(task=call)
# TODO: expand this to allow multiple types of execution plans,
# pending outcome of invoke#461 (which, if flexible enough to
# handle intersect of dependencies+parameterization, just becomes
# 'honor that new feature of Invoke')
# TODO: roles, other non-runtime host parameterizations, etc
# Pre-tasks get added only once, not once per host.
ret.extend(self.expand_calls(call.pre, apply_hosts=False))
# Main task, per host
for host in hosts:
ret.append(self.parameterize(call, host))
# Deal with lack of hosts arg (acts same as `inv` in that case)
# TODO: no tests for this branch?
if not hosts:
ret.append(call)
# Post-tasks added once, not once per host.
ret.extend(self.expand_calls(call.post, apply_hosts=False))
# Add remainder as anonymous task
if self.core.remainder:
# TODO: this will need to change once there are more options for
# setting host lists besides "-H or 100% within-task"
if not hosts:
raise NothingToDo(
"Was told to run a command, but not given any hosts to run it on!" # noqa
)
def anonymous(c):
# TODO: how to make all our tests configure in_stream=False?
c.run(self.core.remainder, in_stream=False)
anon = Call(Task(body=anonymous))
# TODO: see above TODOs about non-parameterized setups, roles etc
# TODO: will likely need to refactor that logic some more so it can
# be used both there and here.
for host in hosts:
ret.append(self.parameterize(anon, host))
return ret
def dedupe(self, tasks):
# Don't perform deduping, we will often have "duplicate" tasks w/
# distinct host values/etc.
# TODO: might want some deduplication later on though - falls under
# "how to mesh parameterization with pre/post/etc deduping".
return tasks
|
fabric/fabric | integration/connection.py | Connection_.mixed_use_of_local_and_run | python | def mixed_use_of_local_and_run(self):
cxn = Connection("localhost")
result = cxn.local("echo foo", hide=True)
assert result.stdout == "foo\n"
assert not cxn.is_connected # meh way of proving it didn't use SSH yet
result = cxn.run("echo foo", hide=True)
assert cxn.is_connected # NOW it's using SSH
assert result.stdout == "foo\n" | Run command truly locally, and over SSH via localhost | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/integration/connection.py#L67-L77 | null | class Connection_:
class ssh_connections:
def open_method_generates_real_connection(self):
c = Connection("localhost")
c.open()
assert c.client.get_transport().active is True
assert c.is_connected is True
return c
def close_method_closes_connection(self):
# Handy shortcut - open things up, then return Connection for us to
# close
c = self.open_method_generates_real_connection()
c.close()
assert c.client.get_transport() is None
assert c.is_connected is False
class run:
def simple_command_on_host(self):
"""
Run command on localhost
"""
result = Connection("localhost").run("echo foo", hide=True)
assert result.stdout == "foo\n"
assert result.exited == 0
assert result.ok is True
def simple_command_with_pty(self):
"""
Run command under PTY on localhost
"""
# Most Unix systems should have stty, which asplodes when not run
# under a pty, and prints useful info otherwise
result = Connection("localhost").run(
"stty size", hide=True, pty=True
)
found = result.stdout.strip().split()
cols, rows = pty_size()
assert tuple(map(int, found)), rows == cols
# PTYs use \r\n, not \n, line separation
assert "\r\n" in result.stdout
assert result.pty is True
class local:
def wraps_invoke_run(self):
# NOTE: most of the interesting tests about this are in
# invoke.runners / invoke.integration.
cxn = Connection("localhost")
result = cxn.local("echo foo", hide=True)
assert result.stdout == "foo\n"
assert not cxn.is_connected # meh way of proving it didn't use SSH
class sudo:
def setup(self):
# NOTE: assumes a user configured for passworded (NOT
# passwordless)_sudo, whose password is 'mypass', is executing the
# test suite. I.e. our travis-ci setup.
config = Config(
{"sudo": {"password": "mypass"}, "run": {"hide": True}}
)
self.cxn = Connection("localhost", config=config)
def sudo_command(self):
"""
Run command via sudo on host localhost
"""
skip_outside_travis()
assert self.cxn.sudo("whoami").stdout.strip() == "root"
def mixed_sudo_and_normal_commands(self):
"""
Run command via sudo, and not via sudo, on localhost
"""
skip_outside_travis()
logname = os.environ["LOGNAME"]
assert self.cxn.run("whoami").stdout.strip() == logname
assert self.cxn.sudo("whoami").stdout.strip() == "root"
def large_remote_commands_finish_cleanly(self):
# Guards against e.g. cleanup finishing before actually reading all
# data from the remote end. Which is largely an issue in Invoke-level
# code but one that only really manifests when doing stuff over the
# network. Yay computers!
path = "/usr/share/dict/words"
cxn = Connection("localhost")
with open(path) as fd:
words = [x.strip() for x in fd.readlines()]
stdout = cxn.run("cat {}".format(path), hide=True).stdout
lines = [x.strip() for x in stdout.splitlines()]
# When bug present, # lines received is significantly fewer than the
# true count in the file (by thousands).
assert len(lines) == len(words)
|
fabric/fabric | fabric/connection.py | Connection.open | python | def open(self):
# Short-circuit
if self.is_connected:
return
err = "Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa
# These may not be given, period
for key in """
hostname
port
username
""".split():
if key in self.connect_kwargs:
raise ValueError(err.format(key))
# These may be given one way or the other, but not both
if (
"timeout" in self.connect_kwargs
and self.connect_timeout is not None
):
raise ValueError(err.format("timeout"))
# No conflicts -> merge 'em together
kwargs = dict(
self.connect_kwargs,
username=self.user,
hostname=self.host,
port=self.port,
)
if self.gateway:
kwargs["sock"] = self.open_gateway()
if self.connect_timeout:
kwargs["timeout"] = self.connect_timeout
# Strip out empty defaults for less noisy debugging
if "key_filename" in kwargs and not kwargs["key_filename"]:
del kwargs["key_filename"]
# Actually connect!
self.client.connect(**kwargs)
self.transport = self.client.get_transport() | Initiate an SSH connection to the host/port this object is bound to.
This may include activating the configured gateway connection, if one
is set.
Also saves a handle to the now-set Transport object for easier access.
Various connect-time settings (and/or their corresponding :ref:`SSH
config options <ssh-config>`) are utilized here in the call to
`SSHClient.connect <paramiko.client.SSHClient.connect>`. (For details,
see :doc:`the configuration docs </concepts/configuration>`.)
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/connection.py#L475-L525 | null | class Connection(Context):
"""
A connection to an SSH daemon, with methods for commands and file transfer.
**Basics**
This class inherits from Invoke's `~invoke.context.Context`, as it is a
context within which commands, tasks etc can operate. It also encapsulates
a Paramiko `~paramiko.client.SSHClient` instance, performing useful high
level operations with that `~paramiko.client.SSHClient` and
`~paramiko.channel.Channel` instances generated from it.
.. _connect_kwargs:
.. note::
Many SSH specific options -- such as specifying private keys and
passphrases, timeouts, disabling SSH agents, etc -- are handled
directly by Paramiko and should be specified via the
:ref:`connect_kwargs argument <connect_kwargs-arg>` of the constructor.
**Lifecycle**
`.Connection` has a basic "`create <__init__>`, `connect/open <open>`, `do
work <run>`, `disconnect/close <close>`" lifecycle:
* `Instantiation <__init__>` imprints the object with its connection
parameters (but does **not** actually initiate the network connection).
* Methods like `run`, `get` etc automatically trigger a call to
`open` if the connection is not active; users may of course call `open`
manually if desired.
* Connections do not always need to be explicitly closed; much of the
time, Paramiko's garbage collection hooks or Python's own shutdown
sequence will take care of things. **However**, should you encounter edge
cases (for example, sessions hanging on exit) it's helpful to explicitly
close connections when you're done with them.
This can be accomplished by manually calling `close`, or by using the
object as a contextmanager::
with Connection('host') as c:
c.run('command')
c.put('file')
.. note::
This class rebinds `invoke.context.Context.run` to `.local` so both
remote and local command execution can coexist.
**Configuration**
Most `.Connection` parameters honor :doc:`Invoke-style configuration
</concepts/configuration>` as well as any applicable :ref:`SSH config file
directives <connection-ssh-config>`. For example, to end up with a
connection to ``admin@myhost``, one could:
- Use any built-in config mechanism, such as ``/etc/fabric.yml``,
``~/.fabric.json``, collection-driven configuration, env vars, etc,
stating ``user: admin`` (or ``{"user": "admin"}``, depending on config
format.) Then ``Connection('myhost')`` would implicitly have a ``user``
of ``admin``.
- Use an SSH config file containing ``User admin`` within any applicable
``Host`` header (``Host myhost``, ``Host *``, etc.) Again,
``Connection('myhost')`` will default to an ``admin`` user.
- Leverage host-parameter shorthand (described in `.Config.__init__`), i.e.
``Connection('admin@myhost')``.
- Give the parameter directly: ``Connection('myhost', user='admin')``.
The same applies to agent forwarding, gateways, and so forth.
.. versionadded:: 2.0
"""
# NOTE: these are initialized here to hint to invoke.Config.__setattr__
# that they should be treated as real attributes instead of config proxies.
# (Additionally, we're doing this instead of using invoke.Config._set() so
# we can take advantage of Sphinx's attribute-doc-comment static analysis.)
# Once an instance is created, these values will usually be non-None
# because they default to the default config values.
host = None
original_host = None
user = None
port = None
ssh_config = None
gateway = None
forward_agent = None
connect_timeout = None
connect_kwargs = None
client = None
transport = None
_sftp = None
_agent_handler = None
# TODO: should "reopening" an existing Connection object that has been
# closed, be allowed? (See e.g. how v1 detects closed/semi-closed
# connections & nukes them before creating a new client to the same host.)
# TODO: push some of this into paramiko.client.Client? e.g. expand what
# Client.exec_command does, it already allows configuring a subset of what
# we do / will eventually do / did in 1.x. It's silly to have to do
# .get_transport().open_session().
def __init__(
self,
host,
user=None,
port=None,
config=None,
gateway=None,
forward_agent=None,
connect_timeout=None,
connect_kwargs=None,
):
"""
Set up a new object representing a server connection.
:param str host:
the hostname (or IP address) of this connection.
May include shorthand for the ``user`` and/or ``port`` parameters,
of the form ``user@host``, ``host:port``, or ``user@host:port``.
.. note::
Due to ambiguity, IPv6 host addresses are incompatible with the
``host:port`` shorthand (though ``user@host`` will still work
OK). In other words, the presence of >1 ``:`` character will
prevent any attempt to derive a shorthand port number; use the
explicit ``port`` parameter instead.
.. note::
If ``host`` matches a ``Host`` clause in loaded SSH config
data, and that ``Host`` clause contains a ``Hostname``
directive, the resulting `.Connection` object will behave as if
``host`` is equal to that ``Hostname`` value.
In all cases, the original value of ``host`` is preserved as
the ``original_host`` attribute.
Thus, given SSH config like so::
Host myalias
Hostname realhostname
a call like ``Connection(host='myalias')`` will result in an
object whose ``host`` attribute is ``realhostname``, and whose
``original_host`` attribute is ``myalias``.
:param str user:
the login user for the remote connection. Defaults to
``config.user``.
:param int port:
the remote port. Defaults to ``config.port``.
:param config:
configuration settings to use when executing methods on this
`.Connection` (e.g. default SSH port and so forth).
Should be a `.Config` or an `invoke.config.Config`
(which will be turned into a `.Config`).
Default is an anonymous `.Config` object.
:param gateway:
An object to use as a proxy or gateway for this connection.
This parameter accepts one of the following:
- another `.Connection` (for a ``ProxyJump`` style gateway);
- a shell command string (for a ``ProxyCommand`` style style
gateway).
Default: ``None``, meaning no gatewaying will occur (unless
otherwise configured; if one wants to override a configured gateway
at runtime, specify ``gateway=False``.)
.. seealso:: :ref:`ssh-gateways`
:param bool forward_agent:
Whether to enable SSH agent forwarding.
Default: ``config.forward_agent``.
:param int connect_timeout:
Connection timeout, in seconds.
Default: ``config.timeouts.connect``.
.. _connect_kwargs-arg:
:param dict connect_kwargs:
Keyword arguments handed verbatim to
`SSHClient.connect <paramiko.client.SSHClient.connect>` (when
`.open` is called).
`.Connection` tries not to grow additional settings/kwargs of its
own unless it is adding value of some kind; thus,
``connect_kwargs`` is currently the right place to hand in paramiko
connection parameters such as ``pkey`` or ``key_filename``. For
example::
c = Connection(
host="hostname",
user="admin",
connect_kwargs={
"key_filename": "/home/myuser/.ssh/private.key",
},
)
Default: ``config.connect_kwargs``.
:raises ValueError:
if user or port values are given via both ``host`` shorthand *and*
their own arguments. (We `refuse the temptation to guess`_).
.. _refuse the temptation to guess:
http://zen-of-python.info/
in-the-face-of-ambiguity-refuse-the-temptation-to-guess.html#12
"""
# NOTE: parent __init__ sets self._config; for now we simply overwrite
# that below. If it's somehow problematic we would want to break parent
# __init__ up in a manner that is more cleanly overrideable.
super(Connection, self).__init__(config=config)
#: The .Config object referenced when handling default values (for e.g.
#: user or port, when not explicitly given) or deciding how to behave.
if config is None:
config = Config()
# Handle 'vanilla' Invoke config objects, which need cloning 'into' one
# of our own Configs (which grants the new defaults, etc, while not
# squashing them if the Invoke-level config already accounted for them)
elif not isinstance(config, Config):
config = config.clone(into=Config)
self._set(_config=config)
# TODO: when/how to run load_files, merge, load_shell_env, etc?
# TODO: i.e. what is the lib use case here (and honestly in invoke too)
shorthand = self.derive_shorthand(host)
host = shorthand["host"]
err = "You supplied the {} via both shorthand and kwarg! Please pick one." # noqa
if shorthand["user"] is not None:
if user is not None:
raise ValueError(err.format("user"))
user = shorthand["user"]
if shorthand["port"] is not None:
if port is not None:
raise ValueError(err.format("port"))
port = shorthand["port"]
# NOTE: we load SSH config data as early as possible as it has
# potential to affect nearly every other attribute.
#: The per-host SSH config data, if any. (See :ref:`ssh-config`.)
self.ssh_config = self.config.base_ssh_config.lookup(host)
self.original_host = host
#: The hostname of the target server.
self.host = host
if "hostname" in self.ssh_config:
# TODO: log that this occurred?
self.host = self.ssh_config["hostname"]
#: The username this connection will use to connect to the remote end.
self.user = user or self.ssh_config.get("user", self.config.user)
# TODO: is it _ever_ possible to give an empty user value (e.g.
# user='')? E.g. do some SSH server specs allow for that?
#: The network port to connect on.
self.port = port or int(self.ssh_config.get("port", self.config.port))
# Gateway/proxy/bastion/jump setting: non-None values - string,
# Connection, even eg False - get set directly; None triggers seek in
# config/ssh_config
#: The gateway `.Connection` or ``ProxyCommand`` string to be used,
#: if any.
self.gateway = gateway if gateway is not None else self.get_gateway()
# NOTE: we use string above, vs ProxyCommand obj, to avoid spinning up
# the ProxyCommand subprocess at init time, vs open() time.
# TODO: make paramiko.proxy.ProxyCommand lazy instead?
if forward_agent is None:
# Default to config...
forward_agent = self.config.forward_agent
# But if ssh_config is present, it wins
if "forwardagent" in self.ssh_config:
# TODO: SSHConfig really, seriously needs some love here, god
map_ = {"yes": True, "no": False}
forward_agent = map_[self.ssh_config["forwardagent"]]
#: Whether agent forwarding is enabled.
self.forward_agent = forward_agent
if connect_timeout is None:
connect_timeout = self.ssh_config.get(
"connecttimeout", self.config.timeouts.connect
)
if connect_timeout is not None:
connect_timeout = int(connect_timeout)
#: Connection timeout
self.connect_timeout = connect_timeout
#: Keyword arguments given to `paramiko.client.SSHClient.connect` when
#: `open` is called.
self.connect_kwargs = self.resolve_connect_kwargs(connect_kwargs)
#: The `paramiko.client.SSHClient` instance this connection wraps.
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
self.client = client
#: A convenience handle onto the return value of
#: ``self.client.get_transport()``.
self.transport = None
def resolve_connect_kwargs(self, connect_kwargs):
# Grab connect_kwargs from config if not explicitly given.
if connect_kwargs is None:
# TODO: is it better to pre-empt conflicts w/ manually-handled
# connect() kwargs (hostname, username, etc) here or in open()?
# We're doing open() for now in case e.g. someone manually modifies
# .connect_kwargs attributewise, but otherwise it feels better to
# do it early instead of late.
connect_kwargs = self.config.connect_kwargs
# Special case: key_filename gets merged instead of overridden.
# TODO: probably want some sorta smart merging generally, special cases
# are bad.
elif "key_filename" in self.config.connect_kwargs:
kwarg_val = connect_kwargs.get("key_filename", [])
conf_val = self.config.connect_kwargs["key_filename"]
# Config value comes before kwarg value (because it may contain
# CLI flag value.)
connect_kwargs["key_filename"] = conf_val + kwarg_val
# SSH config identityfile values come last in the key_filename
# 'hierarchy'.
if "identityfile" in self.ssh_config:
connect_kwargs.setdefault("key_filename", [])
connect_kwargs["key_filename"].extend(
self.ssh_config["identityfile"]
)
return connect_kwargs
def get_gateway(self):
# SSH config wins over Invoke-style config
if "proxyjump" in self.ssh_config:
# Reverse hop1,hop2,hop3 style ProxyJump directive so we start
# with the final (itself non-gatewayed) hop and work up to
# the front (actual, supplied as our own gateway) hop
hops = reversed(self.ssh_config["proxyjump"].split(","))
prev_gw = None
for hop in hops:
# Short-circuit if we appear to be our own proxy, which would
# be a RecursionError. Implies SSH config wildcards.
# TODO: in an ideal world we'd check user/port too in case they
# differ, but...seriously? They can file a PR with those extra
# half dozen test cases in play, E_NOTIME
if self.derive_shorthand(hop)["host"] == self.host:
return None
# Happily, ProxyJump uses identical format to our host
# shorthand...
kwargs = dict(config=self.config.clone())
if prev_gw is not None:
kwargs["gateway"] = prev_gw
cxn = Connection(hop, **kwargs)
prev_gw = cxn
return prev_gw
elif "proxycommand" in self.ssh_config:
# Just a string, which we interpret as a proxy command..
return self.ssh_config["proxycommand"]
# Fallback: config value (may be None).
return self.config.gateway
def __repr__(self):
# Host comes first as it's the most common differentiator by far
bits = [("host", self.host)]
# TODO: maybe always show user regardless? Explicit is good...
if self.user != self.config.user:
bits.append(("user", self.user))
# TODO: harder to make case for 'always show port'; maybe if it's
# non-22 (even if config has overridden the local default)?
if self.port != self.config.port:
bits.append(("port", self.port))
# NOTE: sometimes self.gateway may be eg False if someone wants to
# explicitly override a configured non-None value (as otherwise it's
# impossible for __init__ to tell if a None means "nothing given" or
# "seriously please no gatewaying". So, this must always be a vanilla
# truth test and not eg "is not None".
if self.gateway:
# Displaying type because gw params would probs be too verbose
val = "proxyjump"
if isinstance(self.gateway, string_types):
val = "proxycommand"
bits.append(("gw", val))
return "<Connection {}>".format(
" ".join("{}={}".format(*x) for x in bits)
)
def _identity(self):
# TODO: consider including gateway and maybe even other init kwargs?
# Whether two cxns w/ same user/host/port but different
# gateway/keys/etc, should be considered "the same", is unclear.
return (self.host, self.user, self.port)
def __eq__(self, other):
if not isinstance(other, Connection):
return False
return self._identity() == other._identity()
def __lt__(self, other):
return self._identity() < other._identity()
def __hash__(self):
# NOTE: this departs from Context/DataProxy, which is not usefully
# hashable.
return hash(self._identity())
def derive_shorthand(self, host_string):
user_hostport = host_string.rsplit("@", 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
# IPv6: can't reliably tell where addr ends and port begins, so don't
# try (and don't bother adding special syntax either, user should avoid
# this situation by using port=).
if hostport.count(":") > 1:
host = hostport
port = None
# IPv4: can split on ':' reliably.
else:
host_port = hostport.rsplit(":", 1)
host = host_port.pop(0) or None
port = host_port[0] if host_port and host_port[0] else None
if port is not None:
port = int(port)
return {"user": user, "host": host, "port": port}
@property
def is_connected(self):
"""
Whether or not this connection is actually open.
.. versionadded:: 2.0
"""
return self.transport.active if self.transport else False
def open_gateway(self):
"""
Obtain a socket-like object from `gateway`.
:returns:
A ``direct-tcpip`` `paramiko.channel.Channel`, if `gateway` was a
`.Connection`; or a `~paramiko.proxy.ProxyCommand`, if `gateway`
was a string.
.. versionadded:: 2.0
"""
# ProxyCommand is faster to set up, so do it first.
if isinstance(self.gateway, string_types):
# Leverage a dummy SSHConfig to ensure %h/%p/etc are parsed.
# TODO: use real SSH config once loading one properly is
# implemented.
ssh_conf = SSHConfig()
dummy = "Host {}\n ProxyCommand {}"
ssh_conf.parse(StringIO(dummy.format(self.host, self.gateway)))
return ProxyCommand(ssh_conf.lookup(self.host)["proxycommand"])
# Handle inner-Connection gateway type here.
# TODO: logging
self.gateway.open()
# TODO: expose the opened channel itself as an attribute? (another
# possible argument for separating the two gateway types...) e.g. if
# someone wanted to piggyback on it for other same-interpreter socket
# needs...
# TODO: and the inverse? allow users to supply their own socket/like
# object they got via $WHEREEVER?
# TODO: how best to expose timeout param? reuse general connection
# timeout from config?
return self.gateway.transport.open_channel(
kind="direct-tcpip",
dest_addr=(self.host, int(self.port)),
# NOTE: src_addr needs to be 'empty but not None' values to
# correctly encode into a network message. Theoretically Paramiko
# could auto-interpret None sometime & save us the trouble.
src_addr=("", 0),
)
def close(self):
"""
Terminate the network connection to the remote end, if open.
If no connection is open, this method does nothing.
.. versionadded:: 2.0
"""
if self.is_connected:
self.client.close()
if self.forward_agent and self._agent_handler is not None:
self._agent_handler.close()
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
@opens
def create_session(self):
channel = self.transport.open_session()
if self.forward_agent:
self._agent_handler = AgentRequestHandler(channel)
return channel
@opens
def run(self, command, **kwargs):
"""
Execute a shell command on the remote end of this connection.
This method wraps an SSH-capable implementation of
`invoke.runners.Runner.run`; see its documentation for details.
.. warning::
There are a few spots where Fabric departs from Invoke's default
settings/behaviors; they are documented under
`.Config.global_defaults`.
.. versionadded:: 2.0
"""
runner = self.config.runners.remote(self)
return self._run(runner, command, **kwargs)
@opens
def sudo(self, command, **kwargs):
"""
Execute a shell command, via ``sudo``, on the remote end.
This method is identical to `invoke.context.Context.sudo` in every way,
except in that -- like `run` -- it honors per-host/per-connection
configuration overrides in addition to the generic/global ones. Thus,
for example, per-host sudo passwords may be configured.
.. versionadded:: 2.0
"""
runner = self.config.runners.remote(self)
return self._sudo(runner, command, **kwargs)
def local(self, *args, **kwargs):
"""
Execute a shell command on the local system.
This method is effectively a wrapper of `invoke.run`; see its docs for
details and call signature.
.. versionadded:: 2.0
"""
# Superclass run() uses runners.local, so we can literally just call it
# straight.
return super(Connection, self).run(*args, **kwargs)
@opens
def sftp(self):
"""
Return a `~paramiko.sftp_client.SFTPClient` object.
If called more than one time, memoizes the first result; thus, any
given `.Connection` instance will only ever have a single SFTP client,
and state (such as that managed by
`~paramiko.sftp_client.SFTPClient.chdir`) will be preserved.
.. versionadded:: 2.0
"""
if self._sftp is None:
self._sftp = self.client.open_sftp()
return self._sftp
def get(self, *args, **kwargs):
"""
Get a remote file to the local filesystem or file-like object.
Simply a wrapper for `.Transfer.get`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).get(*args, **kwargs)
def put(self, *args, **kwargs):
"""
Put a remote file (or file-like object) to the remote filesystem.
Simply a wrapper for `.Transfer.put`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).put(*args, **kwargs)
# TODO: yield the socket for advanced users? Other advanced use cases
# (perhaps factor out socket creation itself)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_local(
self,
local_port,
remote_port=None,
remote_host="localhost",
local_host="localhost",
):
"""
Open a tunnel connecting ``local_port`` to the server's environment.
For example, say you want to connect to a remote PostgreSQL database
which is locked down and only accessible via the system it's running
on. You have SSH access to this server, so you can temporarily make
port 5432 on your local system act like port 5432 on the server::
import psycopg2
from fabric import Connection
with Connection('my-db-server').forward_local(5432):
db = psycopg2.connect(
host='localhost', port=5432, database='mydb'
)
# Do things with 'db' here
This method is analogous to using the ``-L`` option of OpenSSH's
``ssh`` program.
:param int local_port: The local port number on which to listen.
:param int remote_port:
The remote port number. Defaults to the same value as
``local_port``.
:param str local_host:
The local hostname/interface on which to listen. Default:
``localhost``.
:param str remote_host:
The remote hostname serving the forwarded remote port. Default:
``localhost`` (i.e., the host this `.Connection` is connected to.)
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not remote_port:
remote_port = local_port
# TunnelManager does all of the work, sitting in the background (so we
# can yield) and spawning threads every time somebody connects to our
# local port.
finished = Event()
manager = TunnelManager(
local_port=local_port,
local_host=local_host,
remote_port=remote_port,
remote_host=remote_host,
# TODO: not a huge fan of handing in our transport, but...?
transport=self.transport,
finished=finished,
)
manager.start()
# Return control to caller now that things ought to be operational
try:
yield
# Teardown once user exits block
finally:
# Signal to manager that it should close all open tunnels
finished.set()
# Then wait for it to do so
manager.join()
# Raise threading errors from within the manager, which would be
# one of:
# - an inner ThreadException, which was created by the manager on
# behalf of its Tunnels; this gets directly raised.
# - some other exception, which would thus have occurred in the
# manager itself; we wrap this in a new ThreadException.
# NOTE: in these cases, some of the metadata tracking in
# ExceptionHandlingThread/ExceptionWrapper/ThreadException (which
# is useful when dealing with multiple nearly-identical sibling IO
# threads) is superfluous, but it doesn't feel worth breaking
# things up further; we just ignore it for now.
wrapper = manager.exception()
if wrapper is not None:
if wrapper.type is ThreadException:
raise wrapper.value
else:
raise ThreadException([wrapper])
# TODO: cancel port forward on transport? Does that even make sense
# here (where we used direct-tcpip) vs the opposite method (which
# is what uses forward-tcpip)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_remote(
self,
remote_port,
local_port=None,
remote_host="127.0.0.1",
local_host="localhost",
):
"""
Open a tunnel connecting ``remote_port`` to the local environment.
For example, say you're running a daemon in development mode on your
workstation at port 8080, and want to funnel traffic to it from a
production or staging environment.
In most situations this isn't possible as your office/home network
probably blocks inbound traffic. But you have SSH access to this
server, so you can temporarily make port 8080 on that server act like
port 8080 on your workstation::
from fabric import Connection
c = Connection('my-remote-server')
with c.forward_remote(8080):
c.run("remote-data-writer --port 8080")
# Assuming remote-data-writer runs until interrupted, this will
# stay open until you Ctrl-C...
This method is analogous to using the ``-R`` option of OpenSSH's
``ssh`` program.
:param int remote_port: The remote port number on which to listen.
:param int local_port:
The local port number. Defaults to the same value as
``remote_port``.
:param str local_host:
The local hostname/interface the forwarded connection talks to.
Default: ``localhost``.
:param str remote_host:
The remote interface address to listen on when forwarding
connections. Default: ``127.0.0.1`` (i.e. only listen on the remote
localhost).
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not local_port:
local_port = remote_port
# Callback executes on each connection to the remote port and is given
# a Channel hooked up to said port. (We don't actually care about the
# source/dest host/port pairs at all; only whether the channel has data
# to read and suchlike.)
# We then pair that channel with a new 'outbound' socket connection to
# the local host/port being forwarded, in a new Tunnel.
# That Tunnel is then added to a shared data structure so we can track
# & close them during shutdown.
#
# TODO: this approach is less than ideal because we have to share state
# between ourselves & the callback handed into the transport's own
# thread handling (which is roughly analogous to our self-controlled
# TunnelManager for local forwarding). See if we can use more of
# Paramiko's API (or improve it and then do so) so that isn't
# necessary.
tunnels = []
def callback(channel, src_addr_tup, dst_addr_tup):
sock = socket.socket()
# TODO: handle connection failure such that channel, etc get closed
sock.connect((local_host, local_port))
# TODO: we don't actually need to generate the Events at our level,
# do we? Just let Tunnel.__init__ do it; all we do is "press its
# button" on shutdown...
tunnel = Tunnel(channel=channel, sock=sock, finished=Event())
tunnel.start()
# Communication between ourselves & the Paramiko handling subthread
tunnels.append(tunnel)
# Ask Paramiko (really, the remote sshd) to call our callback whenever
# connections are established on the remote iface/port.
# transport.request_port_forward(remote_host, remote_port, callback)
try:
self.transport.request_port_forward(
address=remote_host, port=remote_port, handler=callback
)
yield
finally:
# TODO: see above re: lack of a TunnelManager
# TODO: and/or also refactor with TunnelManager re: shutdown logic.
# E.g. maybe have a non-thread TunnelManager-alike with a method
# that acts as the callback? At least then there's a tiny bit more
# encapsulation...meh.
for tunnel in tunnels:
tunnel.finished.set()
tunnel.join()
self.transport.cancel_port_forward(
address=remote_host, port=remote_port
)
|
fabric/fabric | fabric/connection.py | Connection.open_gateway | python | def open_gateway(self):
# ProxyCommand is faster to set up, so do it first.
if isinstance(self.gateway, string_types):
# Leverage a dummy SSHConfig to ensure %h/%p/etc are parsed.
# TODO: use real SSH config once loading one properly is
# implemented.
ssh_conf = SSHConfig()
dummy = "Host {}\n ProxyCommand {}"
ssh_conf.parse(StringIO(dummy.format(self.host, self.gateway)))
return ProxyCommand(ssh_conf.lookup(self.host)["proxycommand"])
# Handle inner-Connection gateway type here.
# TODO: logging
self.gateway.open()
# TODO: expose the opened channel itself as an attribute? (another
# possible argument for separating the two gateway types...) e.g. if
# someone wanted to piggyback on it for other same-interpreter socket
# needs...
# TODO: and the inverse? allow users to supply their own socket/like
# object they got via $WHEREEVER?
# TODO: how best to expose timeout param? reuse general connection
# timeout from config?
return self.gateway.transport.open_channel(
kind="direct-tcpip",
dest_addr=(self.host, int(self.port)),
# NOTE: src_addr needs to be 'empty but not None' values to
# correctly encode into a network message. Theoretically Paramiko
# could auto-interpret None sometime & save us the trouble.
src_addr=("", 0),
) | Obtain a socket-like object from `gateway`.
:returns:
A ``direct-tcpip`` `paramiko.channel.Channel`, if `gateway` was a
`.Connection`; or a `~paramiko.proxy.ProxyCommand`, if `gateway`
was a string.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/connection.py#L527-L565 | null | class Connection(Context):
"""
A connection to an SSH daemon, with methods for commands and file transfer.
**Basics**
This class inherits from Invoke's `~invoke.context.Context`, as it is a
context within which commands, tasks etc can operate. It also encapsulates
a Paramiko `~paramiko.client.SSHClient` instance, performing useful high
level operations with that `~paramiko.client.SSHClient` and
`~paramiko.channel.Channel` instances generated from it.
.. _connect_kwargs:
.. note::
Many SSH specific options -- such as specifying private keys and
passphrases, timeouts, disabling SSH agents, etc -- are handled
directly by Paramiko and should be specified via the
:ref:`connect_kwargs argument <connect_kwargs-arg>` of the constructor.
**Lifecycle**
`.Connection` has a basic "`create <__init__>`, `connect/open <open>`, `do
work <run>`, `disconnect/close <close>`" lifecycle:
* `Instantiation <__init__>` imprints the object with its connection
parameters (but does **not** actually initiate the network connection).
* Methods like `run`, `get` etc automatically trigger a call to
`open` if the connection is not active; users may of course call `open`
manually if desired.
* Connections do not always need to be explicitly closed; much of the
time, Paramiko's garbage collection hooks or Python's own shutdown
sequence will take care of things. **However**, should you encounter edge
cases (for example, sessions hanging on exit) it's helpful to explicitly
close connections when you're done with them.
This can be accomplished by manually calling `close`, or by using the
object as a contextmanager::
with Connection('host') as c:
c.run('command')
c.put('file')
.. note::
This class rebinds `invoke.context.Context.run` to `.local` so both
remote and local command execution can coexist.
**Configuration**
Most `.Connection` parameters honor :doc:`Invoke-style configuration
</concepts/configuration>` as well as any applicable :ref:`SSH config file
directives <connection-ssh-config>`. For example, to end up with a
connection to ``admin@myhost``, one could:
- Use any built-in config mechanism, such as ``/etc/fabric.yml``,
``~/.fabric.json``, collection-driven configuration, env vars, etc,
stating ``user: admin`` (or ``{"user": "admin"}``, depending on config
format.) Then ``Connection('myhost')`` would implicitly have a ``user``
of ``admin``.
- Use an SSH config file containing ``User admin`` within any applicable
``Host`` header (``Host myhost``, ``Host *``, etc.) Again,
``Connection('myhost')`` will default to an ``admin`` user.
- Leverage host-parameter shorthand (described in `.Config.__init__`), i.e.
``Connection('admin@myhost')``.
- Give the parameter directly: ``Connection('myhost', user='admin')``.
The same applies to agent forwarding, gateways, and so forth.
.. versionadded:: 2.0
"""
# NOTE: these are initialized here to hint to invoke.Config.__setattr__
# that they should be treated as real attributes instead of config proxies.
# (Additionally, we're doing this instead of using invoke.Config._set() so
# we can take advantage of Sphinx's attribute-doc-comment static analysis.)
# Once an instance is created, these values will usually be non-None
# because they default to the default config values.
host = None
original_host = None
user = None
port = None
ssh_config = None
gateway = None
forward_agent = None
connect_timeout = None
connect_kwargs = None
client = None
transport = None
_sftp = None
_agent_handler = None
# TODO: should "reopening" an existing Connection object that has been
# closed, be allowed? (See e.g. how v1 detects closed/semi-closed
# connections & nukes them before creating a new client to the same host.)
# TODO: push some of this into paramiko.client.Client? e.g. expand what
# Client.exec_command does, it already allows configuring a subset of what
# we do / will eventually do / did in 1.x. It's silly to have to do
# .get_transport().open_session().
def __init__(
self,
host,
user=None,
port=None,
config=None,
gateway=None,
forward_agent=None,
connect_timeout=None,
connect_kwargs=None,
):
"""
Set up a new object representing a server connection.
:param str host:
the hostname (or IP address) of this connection.
May include shorthand for the ``user`` and/or ``port`` parameters,
of the form ``user@host``, ``host:port``, or ``user@host:port``.
.. note::
Due to ambiguity, IPv6 host addresses are incompatible with the
``host:port`` shorthand (though ``user@host`` will still work
OK). In other words, the presence of >1 ``:`` character will
prevent any attempt to derive a shorthand port number; use the
explicit ``port`` parameter instead.
.. note::
If ``host`` matches a ``Host`` clause in loaded SSH config
data, and that ``Host`` clause contains a ``Hostname``
directive, the resulting `.Connection` object will behave as if
``host`` is equal to that ``Hostname`` value.
In all cases, the original value of ``host`` is preserved as
the ``original_host`` attribute.
Thus, given SSH config like so::
Host myalias
Hostname realhostname
a call like ``Connection(host='myalias')`` will result in an
object whose ``host`` attribute is ``realhostname``, and whose
``original_host`` attribute is ``myalias``.
:param str user:
the login user for the remote connection. Defaults to
``config.user``.
:param int port:
the remote port. Defaults to ``config.port``.
:param config:
configuration settings to use when executing methods on this
`.Connection` (e.g. default SSH port and so forth).
Should be a `.Config` or an `invoke.config.Config`
(which will be turned into a `.Config`).
Default is an anonymous `.Config` object.
:param gateway:
An object to use as a proxy or gateway for this connection.
This parameter accepts one of the following:
- another `.Connection` (for a ``ProxyJump`` style gateway);
- a shell command string (for a ``ProxyCommand`` style style
gateway).
Default: ``None``, meaning no gatewaying will occur (unless
otherwise configured; if one wants to override a configured gateway
at runtime, specify ``gateway=False``.)
.. seealso:: :ref:`ssh-gateways`
:param bool forward_agent:
Whether to enable SSH agent forwarding.
Default: ``config.forward_agent``.
:param int connect_timeout:
Connection timeout, in seconds.
Default: ``config.timeouts.connect``.
.. _connect_kwargs-arg:
:param dict connect_kwargs:
Keyword arguments handed verbatim to
`SSHClient.connect <paramiko.client.SSHClient.connect>` (when
`.open` is called).
`.Connection` tries not to grow additional settings/kwargs of its
own unless it is adding value of some kind; thus,
``connect_kwargs`` is currently the right place to hand in paramiko
connection parameters such as ``pkey`` or ``key_filename``. For
example::
c = Connection(
host="hostname",
user="admin",
connect_kwargs={
"key_filename": "/home/myuser/.ssh/private.key",
},
)
Default: ``config.connect_kwargs``.
:raises ValueError:
if user or port values are given via both ``host`` shorthand *and*
their own arguments. (We `refuse the temptation to guess`_).
.. _refuse the temptation to guess:
http://zen-of-python.info/
in-the-face-of-ambiguity-refuse-the-temptation-to-guess.html#12
"""
# NOTE: parent __init__ sets self._config; for now we simply overwrite
# that below. If it's somehow problematic we would want to break parent
# __init__ up in a manner that is more cleanly overrideable.
super(Connection, self).__init__(config=config)
#: The .Config object referenced when handling default values (for e.g.
#: user or port, when not explicitly given) or deciding how to behave.
if config is None:
config = Config()
# Handle 'vanilla' Invoke config objects, which need cloning 'into' one
# of our own Configs (which grants the new defaults, etc, while not
# squashing them if the Invoke-level config already accounted for them)
elif not isinstance(config, Config):
config = config.clone(into=Config)
self._set(_config=config)
# TODO: when/how to run load_files, merge, load_shell_env, etc?
# TODO: i.e. what is the lib use case here (and honestly in invoke too)
shorthand = self.derive_shorthand(host)
host = shorthand["host"]
err = "You supplied the {} via both shorthand and kwarg! Please pick one." # noqa
if shorthand["user"] is not None:
if user is not None:
raise ValueError(err.format("user"))
user = shorthand["user"]
if shorthand["port"] is not None:
if port is not None:
raise ValueError(err.format("port"))
port = shorthand["port"]
# NOTE: we load SSH config data as early as possible as it has
# potential to affect nearly every other attribute.
#: The per-host SSH config data, if any. (See :ref:`ssh-config`.)
self.ssh_config = self.config.base_ssh_config.lookup(host)
self.original_host = host
#: The hostname of the target server.
self.host = host
if "hostname" in self.ssh_config:
# TODO: log that this occurred?
self.host = self.ssh_config["hostname"]
#: The username this connection will use to connect to the remote end.
self.user = user or self.ssh_config.get("user", self.config.user)
# TODO: is it _ever_ possible to give an empty user value (e.g.
# user='')? E.g. do some SSH server specs allow for that?
#: The network port to connect on.
self.port = port or int(self.ssh_config.get("port", self.config.port))
# Gateway/proxy/bastion/jump setting: non-None values - string,
# Connection, even eg False - get set directly; None triggers seek in
# config/ssh_config
#: The gateway `.Connection` or ``ProxyCommand`` string to be used,
#: if any.
self.gateway = gateway if gateway is not None else self.get_gateway()
# NOTE: we use string above, vs ProxyCommand obj, to avoid spinning up
# the ProxyCommand subprocess at init time, vs open() time.
# TODO: make paramiko.proxy.ProxyCommand lazy instead?
if forward_agent is None:
# Default to config...
forward_agent = self.config.forward_agent
# But if ssh_config is present, it wins
if "forwardagent" in self.ssh_config:
# TODO: SSHConfig really, seriously needs some love here, god
map_ = {"yes": True, "no": False}
forward_agent = map_[self.ssh_config["forwardagent"]]
#: Whether agent forwarding is enabled.
self.forward_agent = forward_agent
if connect_timeout is None:
connect_timeout = self.ssh_config.get(
"connecttimeout", self.config.timeouts.connect
)
if connect_timeout is not None:
connect_timeout = int(connect_timeout)
#: Connection timeout
self.connect_timeout = connect_timeout
#: Keyword arguments given to `paramiko.client.SSHClient.connect` when
#: `open` is called.
self.connect_kwargs = self.resolve_connect_kwargs(connect_kwargs)
#: The `paramiko.client.SSHClient` instance this connection wraps.
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
self.client = client
#: A convenience handle onto the return value of
#: ``self.client.get_transport()``.
self.transport = None
def resolve_connect_kwargs(self, connect_kwargs):
# Grab connect_kwargs from config if not explicitly given.
if connect_kwargs is None:
# TODO: is it better to pre-empt conflicts w/ manually-handled
# connect() kwargs (hostname, username, etc) here or in open()?
# We're doing open() for now in case e.g. someone manually modifies
# .connect_kwargs attributewise, but otherwise it feels better to
# do it early instead of late.
connect_kwargs = self.config.connect_kwargs
# Special case: key_filename gets merged instead of overridden.
# TODO: probably want some sorta smart merging generally, special cases
# are bad.
elif "key_filename" in self.config.connect_kwargs:
kwarg_val = connect_kwargs.get("key_filename", [])
conf_val = self.config.connect_kwargs["key_filename"]
# Config value comes before kwarg value (because it may contain
# CLI flag value.)
connect_kwargs["key_filename"] = conf_val + kwarg_val
# SSH config identityfile values come last in the key_filename
# 'hierarchy'.
if "identityfile" in self.ssh_config:
connect_kwargs.setdefault("key_filename", [])
connect_kwargs["key_filename"].extend(
self.ssh_config["identityfile"]
)
return connect_kwargs
def get_gateway(self):
# SSH config wins over Invoke-style config
if "proxyjump" in self.ssh_config:
# Reverse hop1,hop2,hop3 style ProxyJump directive so we start
# with the final (itself non-gatewayed) hop and work up to
# the front (actual, supplied as our own gateway) hop
hops = reversed(self.ssh_config["proxyjump"].split(","))
prev_gw = None
for hop in hops:
# Short-circuit if we appear to be our own proxy, which would
# be a RecursionError. Implies SSH config wildcards.
# TODO: in an ideal world we'd check user/port too in case they
# differ, but...seriously? They can file a PR with those extra
# half dozen test cases in play, E_NOTIME
if self.derive_shorthand(hop)["host"] == self.host:
return None
# Happily, ProxyJump uses identical format to our host
# shorthand...
kwargs = dict(config=self.config.clone())
if prev_gw is not None:
kwargs["gateway"] = prev_gw
cxn = Connection(hop, **kwargs)
prev_gw = cxn
return prev_gw
elif "proxycommand" in self.ssh_config:
# Just a string, which we interpret as a proxy command..
return self.ssh_config["proxycommand"]
# Fallback: config value (may be None).
return self.config.gateway
def __repr__(self):
# Host comes first as it's the most common differentiator by far
bits = [("host", self.host)]
# TODO: maybe always show user regardless? Explicit is good...
if self.user != self.config.user:
bits.append(("user", self.user))
# TODO: harder to make case for 'always show port'; maybe if it's
# non-22 (even if config has overridden the local default)?
if self.port != self.config.port:
bits.append(("port", self.port))
# NOTE: sometimes self.gateway may be eg False if someone wants to
# explicitly override a configured non-None value (as otherwise it's
# impossible for __init__ to tell if a None means "nothing given" or
# "seriously please no gatewaying". So, this must always be a vanilla
# truth test and not eg "is not None".
if self.gateway:
# Displaying type because gw params would probs be too verbose
val = "proxyjump"
if isinstance(self.gateway, string_types):
val = "proxycommand"
bits.append(("gw", val))
return "<Connection {}>".format(
" ".join("{}={}".format(*x) for x in bits)
)
def _identity(self):
# TODO: consider including gateway and maybe even other init kwargs?
# Whether two cxns w/ same user/host/port but different
# gateway/keys/etc, should be considered "the same", is unclear.
return (self.host, self.user, self.port)
def __eq__(self, other):
if not isinstance(other, Connection):
return False
return self._identity() == other._identity()
def __lt__(self, other):
return self._identity() < other._identity()
def __hash__(self):
# NOTE: this departs from Context/DataProxy, which is not usefully
# hashable.
return hash(self._identity())
def derive_shorthand(self, host_string):
user_hostport = host_string.rsplit("@", 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
# IPv6: can't reliably tell where addr ends and port begins, so don't
# try (and don't bother adding special syntax either, user should avoid
# this situation by using port=).
if hostport.count(":") > 1:
host = hostport
port = None
# IPv4: can split on ':' reliably.
else:
host_port = hostport.rsplit(":", 1)
host = host_port.pop(0) or None
port = host_port[0] if host_port and host_port[0] else None
if port is not None:
port = int(port)
return {"user": user, "host": host, "port": port}
@property
def is_connected(self):
"""
Whether or not this connection is actually open.
.. versionadded:: 2.0
"""
return self.transport.active if self.transport else False
def open(self):
"""
Initiate an SSH connection to the host/port this object is bound to.
This may include activating the configured gateway connection, if one
is set.
Also saves a handle to the now-set Transport object for easier access.
Various connect-time settings (and/or their corresponding :ref:`SSH
config options <ssh-config>`) are utilized here in the call to
`SSHClient.connect <paramiko.client.SSHClient.connect>`. (For details,
see :doc:`the configuration docs </concepts/configuration>`.)
.. versionadded:: 2.0
"""
# Short-circuit
if self.is_connected:
return
err = "Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa
# These may not be given, period
for key in """
hostname
port
username
""".split():
if key in self.connect_kwargs:
raise ValueError(err.format(key))
# These may be given one way or the other, but not both
if (
"timeout" in self.connect_kwargs
and self.connect_timeout is not None
):
raise ValueError(err.format("timeout"))
# No conflicts -> merge 'em together
kwargs = dict(
self.connect_kwargs,
username=self.user,
hostname=self.host,
port=self.port,
)
if self.gateway:
kwargs["sock"] = self.open_gateway()
if self.connect_timeout:
kwargs["timeout"] = self.connect_timeout
# Strip out empty defaults for less noisy debugging
if "key_filename" in kwargs and not kwargs["key_filename"]:
del kwargs["key_filename"]
# Actually connect!
self.client.connect(**kwargs)
self.transport = self.client.get_transport()
def close(self):
"""
Terminate the network connection to the remote end, if open.
If no connection is open, this method does nothing.
.. versionadded:: 2.0
"""
if self.is_connected:
self.client.close()
if self.forward_agent and self._agent_handler is not None:
self._agent_handler.close()
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
@opens
def create_session(self):
channel = self.transport.open_session()
if self.forward_agent:
self._agent_handler = AgentRequestHandler(channel)
return channel
@opens
def run(self, command, **kwargs):
"""
Execute a shell command on the remote end of this connection.
This method wraps an SSH-capable implementation of
`invoke.runners.Runner.run`; see its documentation for details.
.. warning::
There are a few spots where Fabric departs from Invoke's default
settings/behaviors; they are documented under
`.Config.global_defaults`.
.. versionadded:: 2.0
"""
runner = self.config.runners.remote(self)
return self._run(runner, command, **kwargs)
@opens
def sudo(self, command, **kwargs):
"""
Execute a shell command, via ``sudo``, on the remote end.
This method is identical to `invoke.context.Context.sudo` in every way,
except in that -- like `run` -- it honors per-host/per-connection
configuration overrides in addition to the generic/global ones. Thus,
for example, per-host sudo passwords may be configured.
.. versionadded:: 2.0
"""
runner = self.config.runners.remote(self)
return self._sudo(runner, command, **kwargs)
def local(self, *args, **kwargs):
"""
Execute a shell command on the local system.
This method is effectively a wrapper of `invoke.run`; see its docs for
details and call signature.
.. versionadded:: 2.0
"""
# Superclass run() uses runners.local, so we can literally just call it
# straight.
return super(Connection, self).run(*args, **kwargs)
@opens
def sftp(self):
"""
Return a `~paramiko.sftp_client.SFTPClient` object.
If called more than one time, memoizes the first result; thus, any
given `.Connection` instance will only ever have a single SFTP client,
and state (such as that managed by
`~paramiko.sftp_client.SFTPClient.chdir`) will be preserved.
.. versionadded:: 2.0
"""
if self._sftp is None:
self._sftp = self.client.open_sftp()
return self._sftp
def get(self, *args, **kwargs):
"""
Get a remote file to the local filesystem or file-like object.
Simply a wrapper for `.Transfer.get`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).get(*args, **kwargs)
def put(self, *args, **kwargs):
"""
Put a remote file (or file-like object) to the remote filesystem.
Simply a wrapper for `.Transfer.put`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).put(*args, **kwargs)
# TODO: yield the socket for advanced users? Other advanced use cases
# (perhaps factor out socket creation itself)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_local(
self,
local_port,
remote_port=None,
remote_host="localhost",
local_host="localhost",
):
"""
Open a tunnel connecting ``local_port`` to the server's environment.
For example, say you want to connect to a remote PostgreSQL database
which is locked down and only accessible via the system it's running
on. You have SSH access to this server, so you can temporarily make
port 5432 on your local system act like port 5432 on the server::
import psycopg2
from fabric import Connection
with Connection('my-db-server').forward_local(5432):
db = psycopg2.connect(
host='localhost', port=5432, database='mydb'
)
# Do things with 'db' here
This method is analogous to using the ``-L`` option of OpenSSH's
``ssh`` program.
:param int local_port: The local port number on which to listen.
:param int remote_port:
The remote port number. Defaults to the same value as
``local_port``.
:param str local_host:
The local hostname/interface on which to listen. Default:
``localhost``.
:param str remote_host:
The remote hostname serving the forwarded remote port. Default:
``localhost`` (i.e., the host this `.Connection` is connected to.)
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not remote_port:
remote_port = local_port
# TunnelManager does all of the work, sitting in the background (so we
# can yield) and spawning threads every time somebody connects to our
# local port.
finished = Event()
manager = TunnelManager(
local_port=local_port,
local_host=local_host,
remote_port=remote_port,
remote_host=remote_host,
# TODO: not a huge fan of handing in our transport, but...?
transport=self.transport,
finished=finished,
)
manager.start()
# Return control to caller now that things ought to be operational
try:
yield
# Teardown once user exits block
finally:
# Signal to manager that it should close all open tunnels
finished.set()
# Then wait for it to do so
manager.join()
# Raise threading errors from within the manager, which would be
# one of:
# - an inner ThreadException, which was created by the manager on
# behalf of its Tunnels; this gets directly raised.
# - some other exception, which would thus have occurred in the
# manager itself; we wrap this in a new ThreadException.
# NOTE: in these cases, some of the metadata tracking in
# ExceptionHandlingThread/ExceptionWrapper/ThreadException (which
# is useful when dealing with multiple nearly-identical sibling IO
# threads) is superfluous, but it doesn't feel worth breaking
# things up further; we just ignore it for now.
wrapper = manager.exception()
if wrapper is not None:
if wrapper.type is ThreadException:
raise wrapper.value
else:
raise ThreadException([wrapper])
# TODO: cancel port forward on transport? Does that even make sense
# here (where we used direct-tcpip) vs the opposite method (which
# is what uses forward-tcpip)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_remote(
self,
remote_port,
local_port=None,
remote_host="127.0.0.1",
local_host="localhost",
):
"""
Open a tunnel connecting ``remote_port`` to the local environment.
For example, say you're running a daemon in development mode on your
workstation at port 8080, and want to funnel traffic to it from a
production or staging environment.
In most situations this isn't possible as your office/home network
probably blocks inbound traffic. But you have SSH access to this
server, so you can temporarily make port 8080 on that server act like
port 8080 on your workstation::
from fabric import Connection
c = Connection('my-remote-server')
with c.forward_remote(8080):
c.run("remote-data-writer --port 8080")
# Assuming remote-data-writer runs until interrupted, this will
# stay open until you Ctrl-C...
This method is analogous to using the ``-R`` option of OpenSSH's
``ssh`` program.
:param int remote_port: The remote port number on which to listen.
:param int local_port:
The local port number. Defaults to the same value as
``remote_port``.
:param str local_host:
The local hostname/interface the forwarded connection talks to.
Default: ``localhost``.
:param str remote_host:
The remote interface address to listen on when forwarding
connections. Default: ``127.0.0.1`` (i.e. only listen on the remote
localhost).
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not local_port:
local_port = remote_port
# Callback executes on each connection to the remote port and is given
# a Channel hooked up to said port. (We don't actually care about the
# source/dest host/port pairs at all; only whether the channel has data
# to read and suchlike.)
# We then pair that channel with a new 'outbound' socket connection to
# the local host/port being forwarded, in a new Tunnel.
# That Tunnel is then added to a shared data structure so we can track
# & close them during shutdown.
#
# TODO: this approach is less than ideal because we have to share state
# between ourselves & the callback handed into the transport's own
# thread handling (which is roughly analogous to our self-controlled
# TunnelManager for local forwarding). See if we can use more of
# Paramiko's API (or improve it and then do so) so that isn't
# necessary.
tunnels = []
def callback(channel, src_addr_tup, dst_addr_tup):
sock = socket.socket()
# TODO: handle connection failure such that channel, etc get closed
sock.connect((local_host, local_port))
# TODO: we don't actually need to generate the Events at our level,
# do we? Just let Tunnel.__init__ do it; all we do is "press its
# button" on shutdown...
tunnel = Tunnel(channel=channel, sock=sock, finished=Event())
tunnel.start()
# Communication between ourselves & the Paramiko handling subthread
tunnels.append(tunnel)
# Ask Paramiko (really, the remote sshd) to call our callback whenever
# connections are established on the remote iface/port.
# transport.request_port_forward(remote_host, remote_port, callback)
try:
self.transport.request_port_forward(
address=remote_host, port=remote_port, handler=callback
)
yield
finally:
# TODO: see above re: lack of a TunnelManager
# TODO: and/or also refactor with TunnelManager re: shutdown logic.
# E.g. maybe have a non-thread TunnelManager-alike with a method
# that acts as the callback? At least then there's a tiny bit more
# encapsulation...meh.
for tunnel in tunnels:
tunnel.finished.set()
tunnel.join()
self.transport.cancel_port_forward(
address=remote_host, port=remote_port
)
|
fabric/fabric | fabric/connection.py | Connection.close | python | def close(self):
if self.is_connected:
self.client.close()
if self.forward_agent and self._agent_handler is not None:
self._agent_handler.close() | Terminate the network connection to the remote end, if open.
If no connection is open, this method does nothing.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/connection.py#L567-L578 | null | class Connection(Context):
"""
A connection to an SSH daemon, with methods for commands and file transfer.
**Basics**
This class inherits from Invoke's `~invoke.context.Context`, as it is a
context within which commands, tasks etc can operate. It also encapsulates
a Paramiko `~paramiko.client.SSHClient` instance, performing useful high
level operations with that `~paramiko.client.SSHClient` and
`~paramiko.channel.Channel` instances generated from it.
.. _connect_kwargs:
.. note::
Many SSH specific options -- such as specifying private keys and
passphrases, timeouts, disabling SSH agents, etc -- are handled
directly by Paramiko and should be specified via the
:ref:`connect_kwargs argument <connect_kwargs-arg>` of the constructor.
**Lifecycle**
`.Connection` has a basic "`create <__init__>`, `connect/open <open>`, `do
work <run>`, `disconnect/close <close>`" lifecycle:
* `Instantiation <__init__>` imprints the object with its connection
parameters (but does **not** actually initiate the network connection).
* Methods like `run`, `get` etc automatically trigger a call to
`open` if the connection is not active; users may of course call `open`
manually if desired.
* Connections do not always need to be explicitly closed; much of the
time, Paramiko's garbage collection hooks or Python's own shutdown
sequence will take care of things. **However**, should you encounter edge
cases (for example, sessions hanging on exit) it's helpful to explicitly
close connections when you're done with them.
This can be accomplished by manually calling `close`, or by using the
object as a contextmanager::
with Connection('host') as c:
c.run('command')
c.put('file')
.. note::
This class rebinds `invoke.context.Context.run` to `.local` so both
remote and local command execution can coexist.
**Configuration**
Most `.Connection` parameters honor :doc:`Invoke-style configuration
</concepts/configuration>` as well as any applicable :ref:`SSH config file
directives <connection-ssh-config>`. For example, to end up with a
connection to ``admin@myhost``, one could:
- Use any built-in config mechanism, such as ``/etc/fabric.yml``,
``~/.fabric.json``, collection-driven configuration, env vars, etc,
stating ``user: admin`` (or ``{"user": "admin"}``, depending on config
format.) Then ``Connection('myhost')`` would implicitly have a ``user``
of ``admin``.
- Use an SSH config file containing ``User admin`` within any applicable
``Host`` header (``Host myhost``, ``Host *``, etc.) Again,
``Connection('myhost')`` will default to an ``admin`` user.
- Leverage host-parameter shorthand (described in `.Config.__init__`), i.e.
``Connection('admin@myhost')``.
- Give the parameter directly: ``Connection('myhost', user='admin')``.
The same applies to agent forwarding, gateways, and so forth.
.. versionadded:: 2.0
"""
# NOTE: these are initialized here to hint to invoke.Config.__setattr__
# that they should be treated as real attributes instead of config proxies.
# (Additionally, we're doing this instead of using invoke.Config._set() so
# we can take advantage of Sphinx's attribute-doc-comment static analysis.)
# Once an instance is created, these values will usually be non-None
# because they default to the default config values.
host = None
original_host = None
user = None
port = None
ssh_config = None
gateway = None
forward_agent = None
connect_timeout = None
connect_kwargs = None
client = None
transport = None
_sftp = None
_agent_handler = None
# TODO: should "reopening" an existing Connection object that has been
# closed, be allowed? (See e.g. how v1 detects closed/semi-closed
# connections & nukes them before creating a new client to the same host.)
# TODO: push some of this into paramiko.client.Client? e.g. expand what
# Client.exec_command does, it already allows configuring a subset of what
# we do / will eventually do / did in 1.x. It's silly to have to do
# .get_transport().open_session().
def __init__(
self,
host,
user=None,
port=None,
config=None,
gateway=None,
forward_agent=None,
connect_timeout=None,
connect_kwargs=None,
):
"""
Set up a new object representing a server connection.
:param str host:
the hostname (or IP address) of this connection.
May include shorthand for the ``user`` and/or ``port`` parameters,
of the form ``user@host``, ``host:port``, or ``user@host:port``.
.. note::
Due to ambiguity, IPv6 host addresses are incompatible with the
``host:port`` shorthand (though ``user@host`` will still work
OK). In other words, the presence of >1 ``:`` character will
prevent any attempt to derive a shorthand port number; use the
explicit ``port`` parameter instead.
.. note::
If ``host`` matches a ``Host`` clause in loaded SSH config
data, and that ``Host`` clause contains a ``Hostname``
directive, the resulting `.Connection` object will behave as if
``host`` is equal to that ``Hostname`` value.
In all cases, the original value of ``host`` is preserved as
the ``original_host`` attribute.
Thus, given SSH config like so::
Host myalias
Hostname realhostname
a call like ``Connection(host='myalias')`` will result in an
object whose ``host`` attribute is ``realhostname``, and whose
``original_host`` attribute is ``myalias``.
:param str user:
the login user for the remote connection. Defaults to
``config.user``.
:param int port:
the remote port. Defaults to ``config.port``.
:param config:
configuration settings to use when executing methods on this
`.Connection` (e.g. default SSH port and so forth).
Should be a `.Config` or an `invoke.config.Config`
(which will be turned into a `.Config`).
Default is an anonymous `.Config` object.
:param gateway:
An object to use as a proxy or gateway for this connection.
This parameter accepts one of the following:
- another `.Connection` (for a ``ProxyJump`` style gateway);
- a shell command string (for a ``ProxyCommand`` style style
gateway).
Default: ``None``, meaning no gatewaying will occur (unless
otherwise configured; if one wants to override a configured gateway
at runtime, specify ``gateway=False``.)
.. seealso:: :ref:`ssh-gateways`
:param bool forward_agent:
Whether to enable SSH agent forwarding.
Default: ``config.forward_agent``.
:param int connect_timeout:
Connection timeout, in seconds.
Default: ``config.timeouts.connect``.
.. _connect_kwargs-arg:
:param dict connect_kwargs:
Keyword arguments handed verbatim to
`SSHClient.connect <paramiko.client.SSHClient.connect>` (when
`.open` is called).
`.Connection` tries not to grow additional settings/kwargs of its
own unless it is adding value of some kind; thus,
``connect_kwargs`` is currently the right place to hand in paramiko
connection parameters such as ``pkey`` or ``key_filename``. For
example::
c = Connection(
host="hostname",
user="admin",
connect_kwargs={
"key_filename": "/home/myuser/.ssh/private.key",
},
)
Default: ``config.connect_kwargs``.
:raises ValueError:
if user or port values are given via both ``host`` shorthand *and*
their own arguments. (We `refuse the temptation to guess`_).
.. _refuse the temptation to guess:
http://zen-of-python.info/
in-the-face-of-ambiguity-refuse-the-temptation-to-guess.html#12
"""
# NOTE: parent __init__ sets self._config; for now we simply overwrite
# that below. If it's somehow problematic we would want to break parent
# __init__ up in a manner that is more cleanly overrideable.
super(Connection, self).__init__(config=config)
#: The .Config object referenced when handling default values (for e.g.
#: user or port, when not explicitly given) or deciding how to behave.
if config is None:
config = Config()
# Handle 'vanilla' Invoke config objects, which need cloning 'into' one
# of our own Configs (which grants the new defaults, etc, while not
# squashing them if the Invoke-level config already accounted for them)
elif not isinstance(config, Config):
config = config.clone(into=Config)
self._set(_config=config)
# TODO: when/how to run load_files, merge, load_shell_env, etc?
# TODO: i.e. what is the lib use case here (and honestly in invoke too)
shorthand = self.derive_shorthand(host)
host = shorthand["host"]
err = "You supplied the {} via both shorthand and kwarg! Please pick one." # noqa
if shorthand["user"] is not None:
if user is not None:
raise ValueError(err.format("user"))
user = shorthand["user"]
if shorthand["port"] is not None:
if port is not None:
raise ValueError(err.format("port"))
port = shorthand["port"]
# NOTE: we load SSH config data as early as possible as it has
# potential to affect nearly every other attribute.
#: The per-host SSH config data, if any. (See :ref:`ssh-config`.)
self.ssh_config = self.config.base_ssh_config.lookup(host)
self.original_host = host
#: The hostname of the target server.
self.host = host
if "hostname" in self.ssh_config:
# TODO: log that this occurred?
self.host = self.ssh_config["hostname"]
#: The username this connection will use to connect to the remote end.
self.user = user or self.ssh_config.get("user", self.config.user)
# TODO: is it _ever_ possible to give an empty user value (e.g.
# user='')? E.g. do some SSH server specs allow for that?
#: The network port to connect on.
self.port = port or int(self.ssh_config.get("port", self.config.port))
# Gateway/proxy/bastion/jump setting: non-None values - string,
# Connection, even eg False - get set directly; None triggers seek in
# config/ssh_config
#: The gateway `.Connection` or ``ProxyCommand`` string to be used,
#: if any.
self.gateway = gateway if gateway is not None else self.get_gateway()
# NOTE: we use string above, vs ProxyCommand obj, to avoid spinning up
# the ProxyCommand subprocess at init time, vs open() time.
# TODO: make paramiko.proxy.ProxyCommand lazy instead?
if forward_agent is None:
# Default to config...
forward_agent = self.config.forward_agent
# But if ssh_config is present, it wins
if "forwardagent" in self.ssh_config:
# TODO: SSHConfig really, seriously needs some love here, god
map_ = {"yes": True, "no": False}
forward_agent = map_[self.ssh_config["forwardagent"]]
#: Whether agent forwarding is enabled.
self.forward_agent = forward_agent
if connect_timeout is None:
connect_timeout = self.ssh_config.get(
"connecttimeout", self.config.timeouts.connect
)
if connect_timeout is not None:
connect_timeout = int(connect_timeout)
#: Connection timeout
self.connect_timeout = connect_timeout
#: Keyword arguments given to `paramiko.client.SSHClient.connect` when
#: `open` is called.
self.connect_kwargs = self.resolve_connect_kwargs(connect_kwargs)
#: The `paramiko.client.SSHClient` instance this connection wraps.
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
self.client = client
#: A convenience handle onto the return value of
#: ``self.client.get_transport()``.
self.transport = None
def resolve_connect_kwargs(self, connect_kwargs):
# Grab connect_kwargs from config if not explicitly given.
if connect_kwargs is None:
# TODO: is it better to pre-empt conflicts w/ manually-handled
# connect() kwargs (hostname, username, etc) here or in open()?
# We're doing open() for now in case e.g. someone manually modifies
# .connect_kwargs attributewise, but otherwise it feels better to
# do it early instead of late.
connect_kwargs = self.config.connect_kwargs
# Special case: key_filename gets merged instead of overridden.
# TODO: probably want some sorta smart merging generally, special cases
# are bad.
elif "key_filename" in self.config.connect_kwargs:
kwarg_val = connect_kwargs.get("key_filename", [])
conf_val = self.config.connect_kwargs["key_filename"]
# Config value comes before kwarg value (because it may contain
# CLI flag value.)
connect_kwargs["key_filename"] = conf_val + kwarg_val
# SSH config identityfile values come last in the key_filename
# 'hierarchy'.
if "identityfile" in self.ssh_config:
connect_kwargs.setdefault("key_filename", [])
connect_kwargs["key_filename"].extend(
self.ssh_config["identityfile"]
)
return connect_kwargs
def get_gateway(self):
# SSH config wins over Invoke-style config
if "proxyjump" in self.ssh_config:
# Reverse hop1,hop2,hop3 style ProxyJump directive so we start
# with the final (itself non-gatewayed) hop and work up to
# the front (actual, supplied as our own gateway) hop
hops = reversed(self.ssh_config["proxyjump"].split(","))
prev_gw = None
for hop in hops:
# Short-circuit if we appear to be our own proxy, which would
# be a RecursionError. Implies SSH config wildcards.
# TODO: in an ideal world we'd check user/port too in case they
# differ, but...seriously? They can file a PR with those extra
# half dozen test cases in play, E_NOTIME
if self.derive_shorthand(hop)["host"] == self.host:
return None
# Happily, ProxyJump uses identical format to our host
# shorthand...
kwargs = dict(config=self.config.clone())
if prev_gw is not None:
kwargs["gateway"] = prev_gw
cxn = Connection(hop, **kwargs)
prev_gw = cxn
return prev_gw
elif "proxycommand" in self.ssh_config:
# Just a string, which we interpret as a proxy command..
return self.ssh_config["proxycommand"]
# Fallback: config value (may be None).
return self.config.gateway
def __repr__(self):
# Host comes first as it's the most common differentiator by far
bits = [("host", self.host)]
# TODO: maybe always show user regardless? Explicit is good...
if self.user != self.config.user:
bits.append(("user", self.user))
# TODO: harder to make case for 'always show port'; maybe if it's
# non-22 (even if config has overridden the local default)?
if self.port != self.config.port:
bits.append(("port", self.port))
# NOTE: sometimes self.gateway may be eg False if someone wants to
# explicitly override a configured non-None value (as otherwise it's
# impossible for __init__ to tell if a None means "nothing given" or
# "seriously please no gatewaying". So, this must always be a vanilla
# truth test and not eg "is not None".
if self.gateway:
# Displaying type because gw params would probs be too verbose
val = "proxyjump"
if isinstance(self.gateway, string_types):
val = "proxycommand"
bits.append(("gw", val))
return "<Connection {}>".format(
" ".join("{}={}".format(*x) for x in bits)
)
def _identity(self):
# TODO: consider including gateway and maybe even other init kwargs?
# Whether two cxns w/ same user/host/port but different
# gateway/keys/etc, should be considered "the same", is unclear.
return (self.host, self.user, self.port)
def __eq__(self, other):
if not isinstance(other, Connection):
return False
return self._identity() == other._identity()
def __lt__(self, other):
return self._identity() < other._identity()
def __hash__(self):
# NOTE: this departs from Context/DataProxy, which is not usefully
# hashable.
return hash(self._identity())
def derive_shorthand(self, host_string):
user_hostport = host_string.rsplit("@", 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
# IPv6: can't reliably tell where addr ends and port begins, so don't
# try (and don't bother adding special syntax either, user should avoid
# this situation by using port=).
if hostport.count(":") > 1:
host = hostport
port = None
# IPv4: can split on ':' reliably.
else:
host_port = hostport.rsplit(":", 1)
host = host_port.pop(0) or None
port = host_port[0] if host_port and host_port[0] else None
if port is not None:
port = int(port)
return {"user": user, "host": host, "port": port}
@property
def is_connected(self):
"""
Whether or not this connection is actually open.
.. versionadded:: 2.0
"""
return self.transport.active if self.transport else False
def open(self):
"""
Initiate an SSH connection to the host/port this object is bound to.
This may include activating the configured gateway connection, if one
is set.
Also saves a handle to the now-set Transport object for easier access.
Various connect-time settings (and/or their corresponding :ref:`SSH
config options <ssh-config>`) are utilized here in the call to
`SSHClient.connect <paramiko.client.SSHClient.connect>`. (For details,
see :doc:`the configuration docs </concepts/configuration>`.)
.. versionadded:: 2.0
"""
# Short-circuit
if self.is_connected:
return
err = "Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa
# These may not be given, period
for key in """
hostname
port
username
""".split():
if key in self.connect_kwargs:
raise ValueError(err.format(key))
# These may be given one way or the other, but not both
if (
"timeout" in self.connect_kwargs
and self.connect_timeout is not None
):
raise ValueError(err.format("timeout"))
# No conflicts -> merge 'em together
kwargs = dict(
self.connect_kwargs,
username=self.user,
hostname=self.host,
port=self.port,
)
if self.gateway:
kwargs["sock"] = self.open_gateway()
if self.connect_timeout:
kwargs["timeout"] = self.connect_timeout
# Strip out empty defaults for less noisy debugging
if "key_filename" in kwargs and not kwargs["key_filename"]:
del kwargs["key_filename"]
# Actually connect!
self.client.connect(**kwargs)
self.transport = self.client.get_transport()
def open_gateway(self):
"""
Obtain a socket-like object from `gateway`.
:returns:
A ``direct-tcpip`` `paramiko.channel.Channel`, if `gateway` was a
`.Connection`; or a `~paramiko.proxy.ProxyCommand`, if `gateway`
was a string.
.. versionadded:: 2.0
"""
# ProxyCommand is faster to set up, so do it first.
if isinstance(self.gateway, string_types):
# Leverage a dummy SSHConfig to ensure %h/%p/etc are parsed.
# TODO: use real SSH config once loading one properly is
# implemented.
ssh_conf = SSHConfig()
dummy = "Host {}\n ProxyCommand {}"
ssh_conf.parse(StringIO(dummy.format(self.host, self.gateway)))
return ProxyCommand(ssh_conf.lookup(self.host)["proxycommand"])
# Handle inner-Connection gateway type here.
# TODO: logging
self.gateway.open()
# TODO: expose the opened channel itself as an attribute? (another
# possible argument for separating the two gateway types...) e.g. if
# someone wanted to piggyback on it for other same-interpreter socket
# needs...
# TODO: and the inverse? allow users to supply their own socket/like
# object they got via $WHEREEVER?
# TODO: how best to expose timeout param? reuse general connection
# timeout from config?
return self.gateway.transport.open_channel(
kind="direct-tcpip",
dest_addr=(self.host, int(self.port)),
# NOTE: src_addr needs to be 'empty but not None' values to
# correctly encode into a network message. Theoretically Paramiko
# could auto-interpret None sometime & save us the trouble.
src_addr=("", 0),
)
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
@opens
def create_session(self):
channel = self.transport.open_session()
if self.forward_agent:
self._agent_handler = AgentRequestHandler(channel)
return channel
@opens
def run(self, command, **kwargs):
"""
Execute a shell command on the remote end of this connection.
This method wraps an SSH-capable implementation of
`invoke.runners.Runner.run`; see its documentation for details.
.. warning::
There are a few spots where Fabric departs from Invoke's default
settings/behaviors; they are documented under
`.Config.global_defaults`.
.. versionadded:: 2.0
"""
runner = self.config.runners.remote(self)
return self._run(runner, command, **kwargs)
@opens
def sudo(self, command, **kwargs):
"""
Execute a shell command, via ``sudo``, on the remote end.
This method is identical to `invoke.context.Context.sudo` in every way,
except in that -- like `run` -- it honors per-host/per-connection
configuration overrides in addition to the generic/global ones. Thus,
for example, per-host sudo passwords may be configured.
.. versionadded:: 2.0
"""
runner = self.config.runners.remote(self)
return self._sudo(runner, command, **kwargs)
def local(self, *args, **kwargs):
"""
Execute a shell command on the local system.
This method is effectively a wrapper of `invoke.run`; see its docs for
details and call signature.
.. versionadded:: 2.0
"""
# Superclass run() uses runners.local, so we can literally just call it
# straight.
return super(Connection, self).run(*args, **kwargs)
@opens
def sftp(self):
"""
Return a `~paramiko.sftp_client.SFTPClient` object.
If called more than one time, memoizes the first result; thus, any
given `.Connection` instance will only ever have a single SFTP client,
and state (such as that managed by
`~paramiko.sftp_client.SFTPClient.chdir`) will be preserved.
.. versionadded:: 2.0
"""
if self._sftp is None:
self._sftp = self.client.open_sftp()
return self._sftp
def get(self, *args, **kwargs):
"""
Get a remote file to the local filesystem or file-like object.
Simply a wrapper for `.Transfer.get`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).get(*args, **kwargs)
def put(self, *args, **kwargs):
"""
Put a remote file (or file-like object) to the remote filesystem.
Simply a wrapper for `.Transfer.put`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).put(*args, **kwargs)
# TODO: yield the socket for advanced users? Other advanced use cases
# (perhaps factor out socket creation itself)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_local(
self,
local_port,
remote_port=None,
remote_host="localhost",
local_host="localhost",
):
"""
Open a tunnel connecting ``local_port`` to the server's environment.
For example, say you want to connect to a remote PostgreSQL database
which is locked down and only accessible via the system it's running
on. You have SSH access to this server, so you can temporarily make
port 5432 on your local system act like port 5432 on the server::
import psycopg2
from fabric import Connection
with Connection('my-db-server').forward_local(5432):
db = psycopg2.connect(
host='localhost', port=5432, database='mydb'
)
# Do things with 'db' here
This method is analogous to using the ``-L`` option of OpenSSH's
``ssh`` program.
:param int local_port: The local port number on which to listen.
:param int remote_port:
The remote port number. Defaults to the same value as
``local_port``.
:param str local_host:
The local hostname/interface on which to listen. Default:
``localhost``.
:param str remote_host:
The remote hostname serving the forwarded remote port. Default:
``localhost`` (i.e., the host this `.Connection` is connected to.)
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not remote_port:
remote_port = local_port
# TunnelManager does all of the work, sitting in the background (so we
# can yield) and spawning threads every time somebody connects to our
# local port.
finished = Event()
manager = TunnelManager(
local_port=local_port,
local_host=local_host,
remote_port=remote_port,
remote_host=remote_host,
# TODO: not a huge fan of handing in our transport, but...?
transport=self.transport,
finished=finished,
)
manager.start()
# Return control to caller now that things ought to be operational
try:
yield
# Teardown once user exits block
finally:
# Signal to manager that it should close all open tunnels
finished.set()
# Then wait for it to do so
manager.join()
# Raise threading errors from within the manager, which would be
# one of:
# - an inner ThreadException, which was created by the manager on
# behalf of its Tunnels; this gets directly raised.
# - some other exception, which would thus have occurred in the
# manager itself; we wrap this in a new ThreadException.
# NOTE: in these cases, some of the metadata tracking in
# ExceptionHandlingThread/ExceptionWrapper/ThreadException (which
# is useful when dealing with multiple nearly-identical sibling IO
# threads) is superfluous, but it doesn't feel worth breaking
# things up further; we just ignore it for now.
wrapper = manager.exception()
if wrapper is not None:
if wrapper.type is ThreadException:
raise wrapper.value
else:
raise ThreadException([wrapper])
# TODO: cancel port forward on transport? Does that even make sense
# here (where we used direct-tcpip) vs the opposite method (which
# is what uses forward-tcpip)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_remote(
self,
remote_port,
local_port=None,
remote_host="127.0.0.1",
local_host="localhost",
):
"""
Open a tunnel connecting ``remote_port`` to the local environment.
For example, say you're running a daemon in development mode on your
workstation at port 8080, and want to funnel traffic to it from a
production or staging environment.
In most situations this isn't possible as your office/home network
probably blocks inbound traffic. But you have SSH access to this
server, so you can temporarily make port 8080 on that server act like
port 8080 on your workstation::
from fabric import Connection
c = Connection('my-remote-server')
with c.forward_remote(8080):
c.run("remote-data-writer --port 8080")
# Assuming remote-data-writer runs until interrupted, this will
# stay open until you Ctrl-C...
This method is analogous to using the ``-R`` option of OpenSSH's
``ssh`` program.
:param int remote_port: The remote port number on which to listen.
:param int local_port:
The local port number. Defaults to the same value as
``remote_port``.
:param str local_host:
The local hostname/interface the forwarded connection talks to.
Default: ``localhost``.
:param str remote_host:
The remote interface address to listen on when forwarding
connections. Default: ``127.0.0.1`` (i.e. only listen on the remote
localhost).
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not local_port:
local_port = remote_port
# Callback executes on each connection to the remote port and is given
# a Channel hooked up to said port. (We don't actually care about the
# source/dest host/port pairs at all; only whether the channel has data
# to read and suchlike.)
# We then pair that channel with a new 'outbound' socket connection to
# the local host/port being forwarded, in a new Tunnel.
# That Tunnel is then added to a shared data structure so we can track
# & close them during shutdown.
#
# TODO: this approach is less than ideal because we have to share state
# between ourselves & the callback handed into the transport's own
# thread handling (which is roughly analogous to our self-controlled
# TunnelManager for local forwarding). See if we can use more of
# Paramiko's API (or improve it and then do so) so that isn't
# necessary.
tunnels = []
def callback(channel, src_addr_tup, dst_addr_tup):
sock = socket.socket()
# TODO: handle connection failure such that channel, etc get closed
sock.connect((local_host, local_port))
# TODO: we don't actually need to generate the Events at our level,
# do we? Just let Tunnel.__init__ do it; all we do is "press its
# button" on shutdown...
tunnel = Tunnel(channel=channel, sock=sock, finished=Event())
tunnel.start()
# Communication between ourselves & the Paramiko handling subthread
tunnels.append(tunnel)
# Ask Paramiko (really, the remote sshd) to call our callback whenever
# connections are established on the remote iface/port.
# transport.request_port_forward(remote_host, remote_port, callback)
try:
self.transport.request_port_forward(
address=remote_host, port=remote_port, handler=callback
)
yield
finally:
# TODO: see above re: lack of a TunnelManager
# TODO: and/or also refactor with TunnelManager re: shutdown logic.
# E.g. maybe have a non-thread TunnelManager-alike with a method
# that acts as the callback? At least then there's a tiny bit more
# encapsulation...meh.
for tunnel in tunnels:
tunnel.finished.set()
tunnel.join()
self.transport.cancel_port_forward(
address=remote_host, port=remote_port
)
|
fabric/fabric | fabric/connection.py | Connection.run | python | def run(self, command, **kwargs):
runner = self.config.runners.remote(self)
return self._run(runner, command, **kwargs) | Execute a shell command on the remote end of this connection.
This method wraps an SSH-capable implementation of
`invoke.runners.Runner.run`; see its documentation for details.
.. warning::
There are a few spots where Fabric departs from Invoke's default
settings/behaviors; they are documented under
`.Config.global_defaults`.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/connection.py#L594-L609 | null | class Connection(Context):
"""
A connection to an SSH daemon, with methods for commands and file transfer.
**Basics**
This class inherits from Invoke's `~invoke.context.Context`, as it is a
context within which commands, tasks etc can operate. It also encapsulates
a Paramiko `~paramiko.client.SSHClient` instance, performing useful high
level operations with that `~paramiko.client.SSHClient` and
`~paramiko.channel.Channel` instances generated from it.
.. _connect_kwargs:
.. note::
Many SSH specific options -- such as specifying private keys and
passphrases, timeouts, disabling SSH agents, etc -- are handled
directly by Paramiko and should be specified via the
:ref:`connect_kwargs argument <connect_kwargs-arg>` of the constructor.
**Lifecycle**
`.Connection` has a basic "`create <__init__>`, `connect/open <open>`, `do
work <run>`, `disconnect/close <close>`" lifecycle:
* `Instantiation <__init__>` imprints the object with its connection
parameters (but does **not** actually initiate the network connection).
* Methods like `run`, `get` etc automatically trigger a call to
`open` if the connection is not active; users may of course call `open`
manually if desired.
* Connections do not always need to be explicitly closed; much of the
time, Paramiko's garbage collection hooks or Python's own shutdown
sequence will take care of things. **However**, should you encounter edge
cases (for example, sessions hanging on exit) it's helpful to explicitly
close connections when you're done with them.
This can be accomplished by manually calling `close`, or by using the
object as a contextmanager::
with Connection('host') as c:
c.run('command')
c.put('file')
.. note::
This class rebinds `invoke.context.Context.run` to `.local` so both
remote and local command execution can coexist.
**Configuration**
Most `.Connection` parameters honor :doc:`Invoke-style configuration
</concepts/configuration>` as well as any applicable :ref:`SSH config file
directives <connection-ssh-config>`. For example, to end up with a
connection to ``admin@myhost``, one could:
- Use any built-in config mechanism, such as ``/etc/fabric.yml``,
``~/.fabric.json``, collection-driven configuration, env vars, etc,
stating ``user: admin`` (or ``{"user": "admin"}``, depending on config
format.) Then ``Connection('myhost')`` would implicitly have a ``user``
of ``admin``.
- Use an SSH config file containing ``User admin`` within any applicable
``Host`` header (``Host myhost``, ``Host *``, etc.) Again,
``Connection('myhost')`` will default to an ``admin`` user.
- Leverage host-parameter shorthand (described in `.Config.__init__`), i.e.
``Connection('admin@myhost')``.
- Give the parameter directly: ``Connection('myhost', user='admin')``.
The same applies to agent forwarding, gateways, and so forth.
.. versionadded:: 2.0
"""
# NOTE: these are initialized here to hint to invoke.Config.__setattr__
# that they should be treated as real attributes instead of config proxies.
# (Additionally, we're doing this instead of using invoke.Config._set() so
# we can take advantage of Sphinx's attribute-doc-comment static analysis.)
# Once an instance is created, these values will usually be non-None
# because they default to the default config values.
host = None
original_host = None
user = None
port = None
ssh_config = None
gateway = None
forward_agent = None
connect_timeout = None
connect_kwargs = None
client = None
transport = None
_sftp = None
_agent_handler = None
# TODO: should "reopening" an existing Connection object that has been
# closed, be allowed? (See e.g. how v1 detects closed/semi-closed
# connections & nukes them before creating a new client to the same host.)
# TODO: push some of this into paramiko.client.Client? e.g. expand what
# Client.exec_command does, it already allows configuring a subset of what
# we do / will eventually do / did in 1.x. It's silly to have to do
# .get_transport().open_session().
def __init__(
self,
host,
user=None,
port=None,
config=None,
gateway=None,
forward_agent=None,
connect_timeout=None,
connect_kwargs=None,
):
"""
Set up a new object representing a server connection.
:param str host:
the hostname (or IP address) of this connection.
May include shorthand for the ``user`` and/or ``port`` parameters,
of the form ``user@host``, ``host:port``, or ``user@host:port``.
.. note::
Due to ambiguity, IPv6 host addresses are incompatible with the
``host:port`` shorthand (though ``user@host`` will still work
OK). In other words, the presence of >1 ``:`` character will
prevent any attempt to derive a shorthand port number; use the
explicit ``port`` parameter instead.
.. note::
If ``host`` matches a ``Host`` clause in loaded SSH config
data, and that ``Host`` clause contains a ``Hostname``
directive, the resulting `.Connection` object will behave as if
``host`` is equal to that ``Hostname`` value.
In all cases, the original value of ``host`` is preserved as
the ``original_host`` attribute.
Thus, given SSH config like so::
Host myalias
Hostname realhostname
a call like ``Connection(host='myalias')`` will result in an
object whose ``host`` attribute is ``realhostname``, and whose
``original_host`` attribute is ``myalias``.
:param str user:
the login user for the remote connection. Defaults to
``config.user``.
:param int port:
the remote port. Defaults to ``config.port``.
:param config:
configuration settings to use when executing methods on this
`.Connection` (e.g. default SSH port and so forth).
Should be a `.Config` or an `invoke.config.Config`
(which will be turned into a `.Config`).
Default is an anonymous `.Config` object.
:param gateway:
An object to use as a proxy or gateway for this connection.
This parameter accepts one of the following:
- another `.Connection` (for a ``ProxyJump`` style gateway);
- a shell command string (for a ``ProxyCommand`` style style
gateway).
Default: ``None``, meaning no gatewaying will occur (unless
otherwise configured; if one wants to override a configured gateway
at runtime, specify ``gateway=False``.)
.. seealso:: :ref:`ssh-gateways`
:param bool forward_agent:
Whether to enable SSH agent forwarding.
Default: ``config.forward_agent``.
:param int connect_timeout:
Connection timeout, in seconds.
Default: ``config.timeouts.connect``.
.. _connect_kwargs-arg:
:param dict connect_kwargs:
Keyword arguments handed verbatim to
`SSHClient.connect <paramiko.client.SSHClient.connect>` (when
`.open` is called).
`.Connection` tries not to grow additional settings/kwargs of its
own unless it is adding value of some kind; thus,
``connect_kwargs`` is currently the right place to hand in paramiko
connection parameters such as ``pkey`` or ``key_filename``. For
example::
c = Connection(
host="hostname",
user="admin",
connect_kwargs={
"key_filename": "/home/myuser/.ssh/private.key",
},
)
Default: ``config.connect_kwargs``.
:raises ValueError:
if user or port values are given via both ``host`` shorthand *and*
their own arguments. (We `refuse the temptation to guess`_).
.. _refuse the temptation to guess:
http://zen-of-python.info/
in-the-face-of-ambiguity-refuse-the-temptation-to-guess.html#12
"""
# NOTE: parent __init__ sets self._config; for now we simply overwrite
# that below. If it's somehow problematic we would want to break parent
# __init__ up in a manner that is more cleanly overrideable.
super(Connection, self).__init__(config=config)
#: The .Config object referenced when handling default values (for e.g.
#: user or port, when not explicitly given) or deciding how to behave.
if config is None:
config = Config()
# Handle 'vanilla' Invoke config objects, which need cloning 'into' one
# of our own Configs (which grants the new defaults, etc, while not
# squashing them if the Invoke-level config already accounted for them)
elif not isinstance(config, Config):
config = config.clone(into=Config)
self._set(_config=config)
# TODO: when/how to run load_files, merge, load_shell_env, etc?
# TODO: i.e. what is the lib use case here (and honestly in invoke too)
shorthand = self.derive_shorthand(host)
host = shorthand["host"]
err = "You supplied the {} via both shorthand and kwarg! Please pick one." # noqa
if shorthand["user"] is not None:
if user is not None:
raise ValueError(err.format("user"))
user = shorthand["user"]
if shorthand["port"] is not None:
if port is not None:
raise ValueError(err.format("port"))
port = shorthand["port"]
# NOTE: we load SSH config data as early as possible as it has
# potential to affect nearly every other attribute.
#: The per-host SSH config data, if any. (See :ref:`ssh-config`.)
self.ssh_config = self.config.base_ssh_config.lookup(host)
self.original_host = host
#: The hostname of the target server.
self.host = host
if "hostname" in self.ssh_config:
# TODO: log that this occurred?
self.host = self.ssh_config["hostname"]
#: The username this connection will use to connect to the remote end.
self.user = user or self.ssh_config.get("user", self.config.user)
# TODO: is it _ever_ possible to give an empty user value (e.g.
# user='')? E.g. do some SSH server specs allow for that?
#: The network port to connect on.
self.port = port or int(self.ssh_config.get("port", self.config.port))
# Gateway/proxy/bastion/jump setting: non-None values - string,
# Connection, even eg False - get set directly; None triggers seek in
# config/ssh_config
#: The gateway `.Connection` or ``ProxyCommand`` string to be used,
#: if any.
self.gateway = gateway if gateway is not None else self.get_gateway()
# NOTE: we use string above, vs ProxyCommand obj, to avoid spinning up
# the ProxyCommand subprocess at init time, vs open() time.
# TODO: make paramiko.proxy.ProxyCommand lazy instead?
if forward_agent is None:
# Default to config...
forward_agent = self.config.forward_agent
# But if ssh_config is present, it wins
if "forwardagent" in self.ssh_config:
# TODO: SSHConfig really, seriously needs some love here, god
map_ = {"yes": True, "no": False}
forward_agent = map_[self.ssh_config["forwardagent"]]
#: Whether agent forwarding is enabled.
self.forward_agent = forward_agent
if connect_timeout is None:
connect_timeout = self.ssh_config.get(
"connecttimeout", self.config.timeouts.connect
)
if connect_timeout is not None:
connect_timeout = int(connect_timeout)
#: Connection timeout
self.connect_timeout = connect_timeout
#: Keyword arguments given to `paramiko.client.SSHClient.connect` when
#: `open` is called.
self.connect_kwargs = self.resolve_connect_kwargs(connect_kwargs)
#: The `paramiko.client.SSHClient` instance this connection wraps.
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
self.client = client
#: A convenience handle onto the return value of
#: ``self.client.get_transport()``.
self.transport = None
def resolve_connect_kwargs(self, connect_kwargs):
# Grab connect_kwargs from config if not explicitly given.
if connect_kwargs is None:
# TODO: is it better to pre-empt conflicts w/ manually-handled
# connect() kwargs (hostname, username, etc) here or in open()?
# We're doing open() for now in case e.g. someone manually modifies
# .connect_kwargs attributewise, but otherwise it feels better to
# do it early instead of late.
connect_kwargs = self.config.connect_kwargs
# Special case: key_filename gets merged instead of overridden.
# TODO: probably want some sorta smart merging generally, special cases
# are bad.
elif "key_filename" in self.config.connect_kwargs:
kwarg_val = connect_kwargs.get("key_filename", [])
conf_val = self.config.connect_kwargs["key_filename"]
# Config value comes before kwarg value (because it may contain
# CLI flag value.)
connect_kwargs["key_filename"] = conf_val + kwarg_val
# SSH config identityfile values come last in the key_filename
# 'hierarchy'.
if "identityfile" in self.ssh_config:
connect_kwargs.setdefault("key_filename", [])
connect_kwargs["key_filename"].extend(
self.ssh_config["identityfile"]
)
return connect_kwargs
def get_gateway(self):
# SSH config wins over Invoke-style config
if "proxyjump" in self.ssh_config:
# Reverse hop1,hop2,hop3 style ProxyJump directive so we start
# with the final (itself non-gatewayed) hop and work up to
# the front (actual, supplied as our own gateway) hop
hops = reversed(self.ssh_config["proxyjump"].split(","))
prev_gw = None
for hop in hops:
# Short-circuit if we appear to be our own proxy, which would
# be a RecursionError. Implies SSH config wildcards.
# TODO: in an ideal world we'd check user/port too in case they
# differ, but...seriously? They can file a PR with those extra
# half dozen test cases in play, E_NOTIME
if self.derive_shorthand(hop)["host"] == self.host:
return None
# Happily, ProxyJump uses identical format to our host
# shorthand...
kwargs = dict(config=self.config.clone())
if prev_gw is not None:
kwargs["gateway"] = prev_gw
cxn = Connection(hop, **kwargs)
prev_gw = cxn
return prev_gw
elif "proxycommand" in self.ssh_config:
# Just a string, which we interpret as a proxy command..
return self.ssh_config["proxycommand"]
# Fallback: config value (may be None).
return self.config.gateway
def __repr__(self):
# Host comes first as it's the most common differentiator by far
bits = [("host", self.host)]
# TODO: maybe always show user regardless? Explicit is good...
if self.user != self.config.user:
bits.append(("user", self.user))
# TODO: harder to make case for 'always show port'; maybe if it's
# non-22 (even if config has overridden the local default)?
if self.port != self.config.port:
bits.append(("port", self.port))
# NOTE: sometimes self.gateway may be eg False if someone wants to
# explicitly override a configured non-None value (as otherwise it's
# impossible for __init__ to tell if a None means "nothing given" or
# "seriously please no gatewaying". So, this must always be a vanilla
# truth test and not eg "is not None".
if self.gateway:
# Displaying type because gw params would probs be too verbose
val = "proxyjump"
if isinstance(self.gateway, string_types):
val = "proxycommand"
bits.append(("gw", val))
return "<Connection {}>".format(
" ".join("{}={}".format(*x) for x in bits)
)
def _identity(self):
# TODO: consider including gateway and maybe even other init kwargs?
# Whether two cxns w/ same user/host/port but different
# gateway/keys/etc, should be considered "the same", is unclear.
return (self.host, self.user, self.port)
def __eq__(self, other):
if not isinstance(other, Connection):
return False
return self._identity() == other._identity()
def __lt__(self, other):
return self._identity() < other._identity()
def __hash__(self):
# NOTE: this departs from Context/DataProxy, which is not usefully
# hashable.
return hash(self._identity())
def derive_shorthand(self, host_string):
user_hostport = host_string.rsplit("@", 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
# IPv6: can't reliably tell where addr ends and port begins, so don't
# try (and don't bother adding special syntax either, user should avoid
# this situation by using port=).
if hostport.count(":") > 1:
host = hostport
port = None
# IPv4: can split on ':' reliably.
else:
host_port = hostport.rsplit(":", 1)
host = host_port.pop(0) or None
port = host_port[0] if host_port and host_port[0] else None
if port is not None:
port = int(port)
return {"user": user, "host": host, "port": port}
@property
def is_connected(self):
"""
Whether or not this connection is actually open.
.. versionadded:: 2.0
"""
return self.transport.active if self.transport else False
def open(self):
"""
Initiate an SSH connection to the host/port this object is bound to.
This may include activating the configured gateway connection, if one
is set.
Also saves a handle to the now-set Transport object for easier access.
Various connect-time settings (and/or their corresponding :ref:`SSH
config options <ssh-config>`) are utilized here in the call to
`SSHClient.connect <paramiko.client.SSHClient.connect>`. (For details,
see :doc:`the configuration docs </concepts/configuration>`.)
.. versionadded:: 2.0
"""
# Short-circuit
if self.is_connected:
return
err = "Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa
# These may not be given, period
for key in """
hostname
port
username
""".split():
if key in self.connect_kwargs:
raise ValueError(err.format(key))
# These may be given one way or the other, but not both
if (
"timeout" in self.connect_kwargs
and self.connect_timeout is not None
):
raise ValueError(err.format("timeout"))
# No conflicts -> merge 'em together
kwargs = dict(
self.connect_kwargs,
username=self.user,
hostname=self.host,
port=self.port,
)
if self.gateway:
kwargs["sock"] = self.open_gateway()
if self.connect_timeout:
kwargs["timeout"] = self.connect_timeout
# Strip out empty defaults for less noisy debugging
if "key_filename" in kwargs and not kwargs["key_filename"]:
del kwargs["key_filename"]
# Actually connect!
self.client.connect(**kwargs)
self.transport = self.client.get_transport()
def open_gateway(self):
"""
Obtain a socket-like object from `gateway`.
:returns:
A ``direct-tcpip`` `paramiko.channel.Channel`, if `gateway` was a
`.Connection`; or a `~paramiko.proxy.ProxyCommand`, if `gateway`
was a string.
.. versionadded:: 2.0
"""
# ProxyCommand is faster to set up, so do it first.
if isinstance(self.gateway, string_types):
# Leverage a dummy SSHConfig to ensure %h/%p/etc are parsed.
# TODO: use real SSH config once loading one properly is
# implemented.
ssh_conf = SSHConfig()
dummy = "Host {}\n ProxyCommand {}"
ssh_conf.parse(StringIO(dummy.format(self.host, self.gateway)))
return ProxyCommand(ssh_conf.lookup(self.host)["proxycommand"])
# Handle inner-Connection gateway type here.
# TODO: logging
self.gateway.open()
# TODO: expose the opened channel itself as an attribute? (another
# possible argument for separating the two gateway types...) e.g. if
# someone wanted to piggyback on it for other same-interpreter socket
# needs...
# TODO: and the inverse? allow users to supply their own socket/like
# object they got via $WHEREEVER?
# TODO: how best to expose timeout param? reuse general connection
# timeout from config?
return self.gateway.transport.open_channel(
kind="direct-tcpip",
dest_addr=(self.host, int(self.port)),
# NOTE: src_addr needs to be 'empty but not None' values to
# correctly encode into a network message. Theoretically Paramiko
# could auto-interpret None sometime & save us the trouble.
src_addr=("", 0),
)
def close(self):
"""
Terminate the network connection to the remote end, if open.
If no connection is open, this method does nothing.
.. versionadded:: 2.0
"""
if self.is_connected:
self.client.close()
if self.forward_agent and self._agent_handler is not None:
self._agent_handler.close()
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
@opens
def create_session(self):
channel = self.transport.open_session()
if self.forward_agent:
self._agent_handler = AgentRequestHandler(channel)
return channel
@opens
@opens
def sudo(self, command, **kwargs):
"""
Execute a shell command, via ``sudo``, on the remote end.
This method is identical to `invoke.context.Context.sudo` in every way,
except in that -- like `run` -- it honors per-host/per-connection
configuration overrides in addition to the generic/global ones. Thus,
for example, per-host sudo passwords may be configured.
.. versionadded:: 2.0
"""
runner = self.config.runners.remote(self)
return self._sudo(runner, command, **kwargs)
def local(self, *args, **kwargs):
"""
Execute a shell command on the local system.
This method is effectively a wrapper of `invoke.run`; see its docs for
details and call signature.
.. versionadded:: 2.0
"""
# Superclass run() uses runners.local, so we can literally just call it
# straight.
return super(Connection, self).run(*args, **kwargs)
@opens
def sftp(self):
"""
Return a `~paramiko.sftp_client.SFTPClient` object.
If called more than one time, memoizes the first result; thus, any
given `.Connection` instance will only ever have a single SFTP client,
and state (such as that managed by
`~paramiko.sftp_client.SFTPClient.chdir`) will be preserved.
.. versionadded:: 2.0
"""
if self._sftp is None:
self._sftp = self.client.open_sftp()
return self._sftp
def get(self, *args, **kwargs):
"""
Get a remote file to the local filesystem or file-like object.
Simply a wrapper for `.Transfer.get`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).get(*args, **kwargs)
def put(self, *args, **kwargs):
"""
Put a remote file (or file-like object) to the remote filesystem.
Simply a wrapper for `.Transfer.put`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).put(*args, **kwargs)
# TODO: yield the socket for advanced users? Other advanced use cases
# (perhaps factor out socket creation itself)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_local(
self,
local_port,
remote_port=None,
remote_host="localhost",
local_host="localhost",
):
"""
Open a tunnel connecting ``local_port`` to the server's environment.
For example, say you want to connect to a remote PostgreSQL database
which is locked down and only accessible via the system it's running
on. You have SSH access to this server, so you can temporarily make
port 5432 on your local system act like port 5432 on the server::
import psycopg2
from fabric import Connection
with Connection('my-db-server').forward_local(5432):
db = psycopg2.connect(
host='localhost', port=5432, database='mydb'
)
# Do things with 'db' here
This method is analogous to using the ``-L`` option of OpenSSH's
``ssh`` program.
:param int local_port: The local port number on which to listen.
:param int remote_port:
The remote port number. Defaults to the same value as
``local_port``.
:param str local_host:
The local hostname/interface on which to listen. Default:
``localhost``.
:param str remote_host:
The remote hostname serving the forwarded remote port. Default:
``localhost`` (i.e., the host this `.Connection` is connected to.)
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not remote_port:
remote_port = local_port
# TunnelManager does all of the work, sitting in the background (so we
# can yield) and spawning threads every time somebody connects to our
# local port.
finished = Event()
manager = TunnelManager(
local_port=local_port,
local_host=local_host,
remote_port=remote_port,
remote_host=remote_host,
# TODO: not a huge fan of handing in our transport, but...?
transport=self.transport,
finished=finished,
)
manager.start()
# Return control to caller now that things ought to be operational
try:
yield
# Teardown once user exits block
finally:
# Signal to manager that it should close all open tunnels
finished.set()
# Then wait for it to do so
manager.join()
# Raise threading errors from within the manager, which would be
# one of:
# - an inner ThreadException, which was created by the manager on
# behalf of its Tunnels; this gets directly raised.
# - some other exception, which would thus have occurred in the
# manager itself; we wrap this in a new ThreadException.
# NOTE: in these cases, some of the metadata tracking in
# ExceptionHandlingThread/ExceptionWrapper/ThreadException (which
# is useful when dealing with multiple nearly-identical sibling IO
# threads) is superfluous, but it doesn't feel worth breaking
# things up further; we just ignore it for now.
wrapper = manager.exception()
if wrapper is not None:
if wrapper.type is ThreadException:
raise wrapper.value
else:
raise ThreadException([wrapper])
# TODO: cancel port forward on transport? Does that even make sense
# here (where we used direct-tcpip) vs the opposite method (which
# is what uses forward-tcpip)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_remote(
self,
remote_port,
local_port=None,
remote_host="127.0.0.1",
local_host="localhost",
):
"""
Open a tunnel connecting ``remote_port`` to the local environment.
For example, say you're running a daemon in development mode on your
workstation at port 8080, and want to funnel traffic to it from a
production or staging environment.
In most situations this isn't possible as your office/home network
probably blocks inbound traffic. But you have SSH access to this
server, so you can temporarily make port 8080 on that server act like
port 8080 on your workstation::
from fabric import Connection
c = Connection('my-remote-server')
with c.forward_remote(8080):
c.run("remote-data-writer --port 8080")
# Assuming remote-data-writer runs until interrupted, this will
# stay open until you Ctrl-C...
This method is analogous to using the ``-R`` option of OpenSSH's
``ssh`` program.
:param int remote_port: The remote port number on which to listen.
:param int local_port:
The local port number. Defaults to the same value as
``remote_port``.
:param str local_host:
The local hostname/interface the forwarded connection talks to.
Default: ``localhost``.
:param str remote_host:
The remote interface address to listen on when forwarding
connections. Default: ``127.0.0.1`` (i.e. only listen on the remote
localhost).
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not local_port:
local_port = remote_port
# Callback executes on each connection to the remote port and is given
# a Channel hooked up to said port. (We don't actually care about the
# source/dest host/port pairs at all; only whether the channel has data
# to read and suchlike.)
# We then pair that channel with a new 'outbound' socket connection to
# the local host/port being forwarded, in a new Tunnel.
# That Tunnel is then added to a shared data structure so we can track
# & close them during shutdown.
#
# TODO: this approach is less than ideal because we have to share state
# between ourselves & the callback handed into the transport's own
# thread handling (which is roughly analogous to our self-controlled
# TunnelManager for local forwarding). See if we can use more of
# Paramiko's API (or improve it and then do so) so that isn't
# necessary.
tunnels = []
def callback(channel, src_addr_tup, dst_addr_tup):
sock = socket.socket()
# TODO: handle connection failure such that channel, etc get closed
sock.connect((local_host, local_port))
# TODO: we don't actually need to generate the Events at our level,
# do we? Just let Tunnel.__init__ do it; all we do is "press its
# button" on shutdown...
tunnel = Tunnel(channel=channel, sock=sock, finished=Event())
tunnel.start()
# Communication between ourselves & the Paramiko handling subthread
tunnels.append(tunnel)
# Ask Paramiko (really, the remote sshd) to call our callback whenever
# connections are established on the remote iface/port.
# transport.request_port_forward(remote_host, remote_port, callback)
try:
self.transport.request_port_forward(
address=remote_host, port=remote_port, handler=callback
)
yield
finally:
# TODO: see above re: lack of a TunnelManager
# TODO: and/or also refactor with TunnelManager re: shutdown logic.
# E.g. maybe have a non-thread TunnelManager-alike with a method
# that acts as the callback? At least then there's a tiny bit more
# encapsulation...meh.
for tunnel in tunnels:
tunnel.finished.set()
tunnel.join()
self.transport.cancel_port_forward(
address=remote_host, port=remote_port
)
|
fabric/fabric | fabric/connection.py | Connection.sudo | python | def sudo(self, command, **kwargs):
runner = self.config.runners.remote(self)
return self._sudo(runner, command, **kwargs) | Execute a shell command, via ``sudo``, on the remote end.
This method is identical to `invoke.context.Context.sudo` in every way,
except in that -- like `run` -- it honors per-host/per-connection
configuration overrides in addition to the generic/global ones. Thus,
for example, per-host sudo passwords may be configured.
.. versionadded:: 2.0 | train | https://github.com/fabric/fabric/blob/e9939d68b734935f0c98d98817912ad7c698238f/fabric/connection.py#L612-L624 | null | class Connection(Context):
"""
A connection to an SSH daemon, with methods for commands and file transfer.
**Basics**
This class inherits from Invoke's `~invoke.context.Context`, as it is a
context within which commands, tasks etc can operate. It also encapsulates
a Paramiko `~paramiko.client.SSHClient` instance, performing useful high
level operations with that `~paramiko.client.SSHClient` and
`~paramiko.channel.Channel` instances generated from it.
.. _connect_kwargs:
.. note::
Many SSH specific options -- such as specifying private keys and
passphrases, timeouts, disabling SSH agents, etc -- are handled
directly by Paramiko and should be specified via the
:ref:`connect_kwargs argument <connect_kwargs-arg>` of the constructor.
**Lifecycle**
`.Connection` has a basic "`create <__init__>`, `connect/open <open>`, `do
work <run>`, `disconnect/close <close>`" lifecycle:
* `Instantiation <__init__>` imprints the object with its connection
parameters (but does **not** actually initiate the network connection).
* Methods like `run`, `get` etc automatically trigger a call to
`open` if the connection is not active; users may of course call `open`
manually if desired.
* Connections do not always need to be explicitly closed; much of the
time, Paramiko's garbage collection hooks or Python's own shutdown
sequence will take care of things. **However**, should you encounter edge
cases (for example, sessions hanging on exit) it's helpful to explicitly
close connections when you're done with them.
This can be accomplished by manually calling `close`, or by using the
object as a contextmanager::
with Connection('host') as c:
c.run('command')
c.put('file')
.. note::
This class rebinds `invoke.context.Context.run` to `.local` so both
remote and local command execution can coexist.
**Configuration**
Most `.Connection` parameters honor :doc:`Invoke-style configuration
</concepts/configuration>` as well as any applicable :ref:`SSH config file
directives <connection-ssh-config>`. For example, to end up with a
connection to ``admin@myhost``, one could:
- Use any built-in config mechanism, such as ``/etc/fabric.yml``,
``~/.fabric.json``, collection-driven configuration, env vars, etc,
stating ``user: admin`` (or ``{"user": "admin"}``, depending on config
format.) Then ``Connection('myhost')`` would implicitly have a ``user``
of ``admin``.
- Use an SSH config file containing ``User admin`` within any applicable
``Host`` header (``Host myhost``, ``Host *``, etc.) Again,
``Connection('myhost')`` will default to an ``admin`` user.
- Leverage host-parameter shorthand (described in `.Config.__init__`), i.e.
``Connection('admin@myhost')``.
- Give the parameter directly: ``Connection('myhost', user='admin')``.
The same applies to agent forwarding, gateways, and so forth.
.. versionadded:: 2.0
"""
# NOTE: these are initialized here to hint to invoke.Config.__setattr__
# that they should be treated as real attributes instead of config proxies.
# (Additionally, we're doing this instead of using invoke.Config._set() so
# we can take advantage of Sphinx's attribute-doc-comment static analysis.)
# Once an instance is created, these values will usually be non-None
# because they default to the default config values.
host = None
original_host = None
user = None
port = None
ssh_config = None
gateway = None
forward_agent = None
connect_timeout = None
connect_kwargs = None
client = None
transport = None
_sftp = None
_agent_handler = None
# TODO: should "reopening" an existing Connection object that has been
# closed, be allowed? (See e.g. how v1 detects closed/semi-closed
# connections & nukes them before creating a new client to the same host.)
# TODO: push some of this into paramiko.client.Client? e.g. expand what
# Client.exec_command does, it already allows configuring a subset of what
# we do / will eventually do / did in 1.x. It's silly to have to do
# .get_transport().open_session().
def __init__(
self,
host,
user=None,
port=None,
config=None,
gateway=None,
forward_agent=None,
connect_timeout=None,
connect_kwargs=None,
):
"""
Set up a new object representing a server connection.
:param str host:
the hostname (or IP address) of this connection.
May include shorthand for the ``user`` and/or ``port`` parameters,
of the form ``user@host``, ``host:port``, or ``user@host:port``.
.. note::
Due to ambiguity, IPv6 host addresses are incompatible with the
``host:port`` shorthand (though ``user@host`` will still work
OK). In other words, the presence of >1 ``:`` character will
prevent any attempt to derive a shorthand port number; use the
explicit ``port`` parameter instead.
.. note::
If ``host`` matches a ``Host`` clause in loaded SSH config
data, and that ``Host`` clause contains a ``Hostname``
directive, the resulting `.Connection` object will behave as if
``host`` is equal to that ``Hostname`` value.
In all cases, the original value of ``host`` is preserved as
the ``original_host`` attribute.
Thus, given SSH config like so::
Host myalias
Hostname realhostname
a call like ``Connection(host='myalias')`` will result in an
object whose ``host`` attribute is ``realhostname``, and whose
``original_host`` attribute is ``myalias``.
:param str user:
the login user for the remote connection. Defaults to
``config.user``.
:param int port:
the remote port. Defaults to ``config.port``.
:param config:
configuration settings to use when executing methods on this
`.Connection` (e.g. default SSH port and so forth).
Should be a `.Config` or an `invoke.config.Config`
(which will be turned into a `.Config`).
Default is an anonymous `.Config` object.
:param gateway:
An object to use as a proxy or gateway for this connection.
This parameter accepts one of the following:
- another `.Connection` (for a ``ProxyJump`` style gateway);
- a shell command string (for a ``ProxyCommand`` style style
gateway).
Default: ``None``, meaning no gatewaying will occur (unless
otherwise configured; if one wants to override a configured gateway
at runtime, specify ``gateway=False``.)
.. seealso:: :ref:`ssh-gateways`
:param bool forward_agent:
Whether to enable SSH agent forwarding.
Default: ``config.forward_agent``.
:param int connect_timeout:
Connection timeout, in seconds.
Default: ``config.timeouts.connect``.
.. _connect_kwargs-arg:
:param dict connect_kwargs:
Keyword arguments handed verbatim to
`SSHClient.connect <paramiko.client.SSHClient.connect>` (when
`.open` is called).
`.Connection` tries not to grow additional settings/kwargs of its
own unless it is adding value of some kind; thus,
``connect_kwargs`` is currently the right place to hand in paramiko
connection parameters such as ``pkey`` or ``key_filename``. For
example::
c = Connection(
host="hostname",
user="admin",
connect_kwargs={
"key_filename": "/home/myuser/.ssh/private.key",
},
)
Default: ``config.connect_kwargs``.
:raises ValueError:
if user or port values are given via both ``host`` shorthand *and*
their own arguments. (We `refuse the temptation to guess`_).
.. _refuse the temptation to guess:
http://zen-of-python.info/
in-the-face-of-ambiguity-refuse-the-temptation-to-guess.html#12
"""
# NOTE: parent __init__ sets self._config; for now we simply overwrite
# that below. If it's somehow problematic we would want to break parent
# __init__ up in a manner that is more cleanly overrideable.
super(Connection, self).__init__(config=config)
#: The .Config object referenced when handling default values (for e.g.
#: user or port, when not explicitly given) or deciding how to behave.
if config is None:
config = Config()
# Handle 'vanilla' Invoke config objects, which need cloning 'into' one
# of our own Configs (which grants the new defaults, etc, while not
# squashing them if the Invoke-level config already accounted for them)
elif not isinstance(config, Config):
config = config.clone(into=Config)
self._set(_config=config)
# TODO: when/how to run load_files, merge, load_shell_env, etc?
# TODO: i.e. what is the lib use case here (and honestly in invoke too)
shorthand = self.derive_shorthand(host)
host = shorthand["host"]
err = "You supplied the {} via both shorthand and kwarg! Please pick one." # noqa
if shorthand["user"] is not None:
if user is not None:
raise ValueError(err.format("user"))
user = shorthand["user"]
if shorthand["port"] is not None:
if port is not None:
raise ValueError(err.format("port"))
port = shorthand["port"]
# NOTE: we load SSH config data as early as possible as it has
# potential to affect nearly every other attribute.
#: The per-host SSH config data, if any. (See :ref:`ssh-config`.)
self.ssh_config = self.config.base_ssh_config.lookup(host)
self.original_host = host
#: The hostname of the target server.
self.host = host
if "hostname" in self.ssh_config:
# TODO: log that this occurred?
self.host = self.ssh_config["hostname"]
#: The username this connection will use to connect to the remote end.
self.user = user or self.ssh_config.get("user", self.config.user)
# TODO: is it _ever_ possible to give an empty user value (e.g.
# user='')? E.g. do some SSH server specs allow for that?
#: The network port to connect on.
self.port = port or int(self.ssh_config.get("port", self.config.port))
# Gateway/proxy/bastion/jump setting: non-None values - string,
# Connection, even eg False - get set directly; None triggers seek in
# config/ssh_config
#: The gateway `.Connection` or ``ProxyCommand`` string to be used,
#: if any.
self.gateway = gateway if gateway is not None else self.get_gateway()
# NOTE: we use string above, vs ProxyCommand obj, to avoid spinning up
# the ProxyCommand subprocess at init time, vs open() time.
# TODO: make paramiko.proxy.ProxyCommand lazy instead?
if forward_agent is None:
# Default to config...
forward_agent = self.config.forward_agent
# But if ssh_config is present, it wins
if "forwardagent" in self.ssh_config:
# TODO: SSHConfig really, seriously needs some love here, god
map_ = {"yes": True, "no": False}
forward_agent = map_[self.ssh_config["forwardagent"]]
#: Whether agent forwarding is enabled.
self.forward_agent = forward_agent
if connect_timeout is None:
connect_timeout = self.ssh_config.get(
"connecttimeout", self.config.timeouts.connect
)
if connect_timeout is not None:
connect_timeout = int(connect_timeout)
#: Connection timeout
self.connect_timeout = connect_timeout
#: Keyword arguments given to `paramiko.client.SSHClient.connect` when
#: `open` is called.
self.connect_kwargs = self.resolve_connect_kwargs(connect_kwargs)
#: The `paramiko.client.SSHClient` instance this connection wraps.
client = SSHClient()
client.set_missing_host_key_policy(AutoAddPolicy())
self.client = client
#: A convenience handle onto the return value of
#: ``self.client.get_transport()``.
self.transport = None
def resolve_connect_kwargs(self, connect_kwargs):
# Grab connect_kwargs from config if not explicitly given.
if connect_kwargs is None:
# TODO: is it better to pre-empt conflicts w/ manually-handled
# connect() kwargs (hostname, username, etc) here or in open()?
# We're doing open() for now in case e.g. someone manually modifies
# .connect_kwargs attributewise, but otherwise it feels better to
# do it early instead of late.
connect_kwargs = self.config.connect_kwargs
# Special case: key_filename gets merged instead of overridden.
# TODO: probably want some sorta smart merging generally, special cases
# are bad.
elif "key_filename" in self.config.connect_kwargs:
kwarg_val = connect_kwargs.get("key_filename", [])
conf_val = self.config.connect_kwargs["key_filename"]
# Config value comes before kwarg value (because it may contain
# CLI flag value.)
connect_kwargs["key_filename"] = conf_val + kwarg_val
# SSH config identityfile values come last in the key_filename
# 'hierarchy'.
if "identityfile" in self.ssh_config:
connect_kwargs.setdefault("key_filename", [])
connect_kwargs["key_filename"].extend(
self.ssh_config["identityfile"]
)
return connect_kwargs
def get_gateway(self):
# SSH config wins over Invoke-style config
if "proxyjump" in self.ssh_config:
# Reverse hop1,hop2,hop3 style ProxyJump directive so we start
# with the final (itself non-gatewayed) hop and work up to
# the front (actual, supplied as our own gateway) hop
hops = reversed(self.ssh_config["proxyjump"].split(","))
prev_gw = None
for hop in hops:
# Short-circuit if we appear to be our own proxy, which would
# be a RecursionError. Implies SSH config wildcards.
# TODO: in an ideal world we'd check user/port too in case they
# differ, but...seriously? They can file a PR with those extra
# half dozen test cases in play, E_NOTIME
if self.derive_shorthand(hop)["host"] == self.host:
return None
# Happily, ProxyJump uses identical format to our host
# shorthand...
kwargs = dict(config=self.config.clone())
if prev_gw is not None:
kwargs["gateway"] = prev_gw
cxn = Connection(hop, **kwargs)
prev_gw = cxn
return prev_gw
elif "proxycommand" in self.ssh_config:
# Just a string, which we interpret as a proxy command..
return self.ssh_config["proxycommand"]
# Fallback: config value (may be None).
return self.config.gateway
def __repr__(self):
# Host comes first as it's the most common differentiator by far
bits = [("host", self.host)]
# TODO: maybe always show user regardless? Explicit is good...
if self.user != self.config.user:
bits.append(("user", self.user))
# TODO: harder to make case for 'always show port'; maybe if it's
# non-22 (even if config has overridden the local default)?
if self.port != self.config.port:
bits.append(("port", self.port))
# NOTE: sometimes self.gateway may be eg False if someone wants to
# explicitly override a configured non-None value (as otherwise it's
# impossible for __init__ to tell if a None means "nothing given" or
# "seriously please no gatewaying". So, this must always be a vanilla
# truth test and not eg "is not None".
if self.gateway:
# Displaying type because gw params would probs be too verbose
val = "proxyjump"
if isinstance(self.gateway, string_types):
val = "proxycommand"
bits.append(("gw", val))
return "<Connection {}>".format(
" ".join("{}={}".format(*x) for x in bits)
)
def _identity(self):
# TODO: consider including gateway and maybe even other init kwargs?
# Whether two cxns w/ same user/host/port but different
# gateway/keys/etc, should be considered "the same", is unclear.
return (self.host, self.user, self.port)
def __eq__(self, other):
if not isinstance(other, Connection):
return False
return self._identity() == other._identity()
def __lt__(self, other):
return self._identity() < other._identity()
def __hash__(self):
# NOTE: this departs from Context/DataProxy, which is not usefully
# hashable.
return hash(self._identity())
def derive_shorthand(self, host_string):
user_hostport = host_string.rsplit("@", 1)
hostport = user_hostport.pop()
user = user_hostport[0] if user_hostport and user_hostport[0] else None
# IPv6: can't reliably tell where addr ends and port begins, so don't
# try (and don't bother adding special syntax either, user should avoid
# this situation by using port=).
if hostport.count(":") > 1:
host = hostport
port = None
# IPv4: can split on ':' reliably.
else:
host_port = hostport.rsplit(":", 1)
host = host_port.pop(0) or None
port = host_port[0] if host_port and host_port[0] else None
if port is not None:
port = int(port)
return {"user": user, "host": host, "port": port}
@property
def is_connected(self):
"""
Whether or not this connection is actually open.
.. versionadded:: 2.0
"""
return self.transport.active if self.transport else False
def open(self):
"""
Initiate an SSH connection to the host/port this object is bound to.
This may include activating the configured gateway connection, if one
is set.
Also saves a handle to the now-set Transport object for easier access.
Various connect-time settings (and/or their corresponding :ref:`SSH
config options <ssh-config>`) are utilized here in the call to
`SSHClient.connect <paramiko.client.SSHClient.connect>`. (For details,
see :doc:`the configuration docs </concepts/configuration>`.)
.. versionadded:: 2.0
"""
# Short-circuit
if self.is_connected:
return
err = "Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa
# These may not be given, period
for key in """
hostname
port
username
""".split():
if key in self.connect_kwargs:
raise ValueError(err.format(key))
# These may be given one way or the other, but not both
if (
"timeout" in self.connect_kwargs
and self.connect_timeout is not None
):
raise ValueError(err.format("timeout"))
# No conflicts -> merge 'em together
kwargs = dict(
self.connect_kwargs,
username=self.user,
hostname=self.host,
port=self.port,
)
if self.gateway:
kwargs["sock"] = self.open_gateway()
if self.connect_timeout:
kwargs["timeout"] = self.connect_timeout
# Strip out empty defaults for less noisy debugging
if "key_filename" in kwargs and not kwargs["key_filename"]:
del kwargs["key_filename"]
# Actually connect!
self.client.connect(**kwargs)
self.transport = self.client.get_transport()
def open_gateway(self):
"""
Obtain a socket-like object from `gateway`.
:returns:
A ``direct-tcpip`` `paramiko.channel.Channel`, if `gateway` was a
`.Connection`; or a `~paramiko.proxy.ProxyCommand`, if `gateway`
was a string.
.. versionadded:: 2.0
"""
# ProxyCommand is faster to set up, so do it first.
if isinstance(self.gateway, string_types):
# Leverage a dummy SSHConfig to ensure %h/%p/etc are parsed.
# TODO: use real SSH config once loading one properly is
# implemented.
ssh_conf = SSHConfig()
dummy = "Host {}\n ProxyCommand {}"
ssh_conf.parse(StringIO(dummy.format(self.host, self.gateway)))
return ProxyCommand(ssh_conf.lookup(self.host)["proxycommand"])
# Handle inner-Connection gateway type here.
# TODO: logging
self.gateway.open()
# TODO: expose the opened channel itself as an attribute? (another
# possible argument for separating the two gateway types...) e.g. if
# someone wanted to piggyback on it for other same-interpreter socket
# needs...
# TODO: and the inverse? allow users to supply their own socket/like
# object they got via $WHEREEVER?
# TODO: how best to expose timeout param? reuse general connection
# timeout from config?
return self.gateway.transport.open_channel(
kind="direct-tcpip",
dest_addr=(self.host, int(self.port)),
# NOTE: src_addr needs to be 'empty but not None' values to
# correctly encode into a network message. Theoretically Paramiko
# could auto-interpret None sometime & save us the trouble.
src_addr=("", 0),
)
def close(self):
"""
Terminate the network connection to the remote end, if open.
If no connection is open, this method does nothing.
.. versionadded:: 2.0
"""
if self.is_connected:
self.client.close()
if self.forward_agent and self._agent_handler is not None:
self._agent_handler.close()
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
@opens
def create_session(self):
channel = self.transport.open_session()
if self.forward_agent:
self._agent_handler = AgentRequestHandler(channel)
return channel
@opens
def run(self, command, **kwargs):
"""
Execute a shell command on the remote end of this connection.
This method wraps an SSH-capable implementation of
`invoke.runners.Runner.run`; see its documentation for details.
.. warning::
There are a few spots where Fabric departs from Invoke's default
settings/behaviors; they are documented under
`.Config.global_defaults`.
.. versionadded:: 2.0
"""
runner = self.config.runners.remote(self)
return self._run(runner, command, **kwargs)
@opens
def local(self, *args, **kwargs):
"""
Execute a shell command on the local system.
This method is effectively a wrapper of `invoke.run`; see its docs for
details and call signature.
.. versionadded:: 2.0
"""
# Superclass run() uses runners.local, so we can literally just call it
# straight.
return super(Connection, self).run(*args, **kwargs)
@opens
def sftp(self):
"""
Return a `~paramiko.sftp_client.SFTPClient` object.
If called more than one time, memoizes the first result; thus, any
given `.Connection` instance will only ever have a single SFTP client,
and state (such as that managed by
`~paramiko.sftp_client.SFTPClient.chdir`) will be preserved.
.. versionadded:: 2.0
"""
if self._sftp is None:
self._sftp = self.client.open_sftp()
return self._sftp
def get(self, *args, **kwargs):
"""
Get a remote file to the local filesystem or file-like object.
Simply a wrapper for `.Transfer.get`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).get(*args, **kwargs)
def put(self, *args, **kwargs):
"""
Put a remote file (or file-like object) to the remote filesystem.
Simply a wrapper for `.Transfer.put`. Please see its documentation for
all details.
.. versionadded:: 2.0
"""
return Transfer(self).put(*args, **kwargs)
# TODO: yield the socket for advanced users? Other advanced use cases
# (perhaps factor out socket creation itself)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_local(
self,
local_port,
remote_port=None,
remote_host="localhost",
local_host="localhost",
):
"""
Open a tunnel connecting ``local_port`` to the server's environment.
For example, say you want to connect to a remote PostgreSQL database
which is locked down and only accessible via the system it's running
on. You have SSH access to this server, so you can temporarily make
port 5432 on your local system act like port 5432 on the server::
import psycopg2
from fabric import Connection
with Connection('my-db-server').forward_local(5432):
db = psycopg2.connect(
host='localhost', port=5432, database='mydb'
)
# Do things with 'db' here
This method is analogous to using the ``-L`` option of OpenSSH's
``ssh`` program.
:param int local_port: The local port number on which to listen.
:param int remote_port:
The remote port number. Defaults to the same value as
``local_port``.
:param str local_host:
The local hostname/interface on which to listen. Default:
``localhost``.
:param str remote_host:
The remote hostname serving the forwarded remote port. Default:
``localhost`` (i.e., the host this `.Connection` is connected to.)
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not remote_port:
remote_port = local_port
# TunnelManager does all of the work, sitting in the background (so we
# can yield) and spawning threads every time somebody connects to our
# local port.
finished = Event()
manager = TunnelManager(
local_port=local_port,
local_host=local_host,
remote_port=remote_port,
remote_host=remote_host,
# TODO: not a huge fan of handing in our transport, but...?
transport=self.transport,
finished=finished,
)
manager.start()
# Return control to caller now that things ought to be operational
try:
yield
# Teardown once user exits block
finally:
# Signal to manager that it should close all open tunnels
finished.set()
# Then wait for it to do so
manager.join()
# Raise threading errors from within the manager, which would be
# one of:
# - an inner ThreadException, which was created by the manager on
# behalf of its Tunnels; this gets directly raised.
# - some other exception, which would thus have occurred in the
# manager itself; we wrap this in a new ThreadException.
# NOTE: in these cases, some of the metadata tracking in
# ExceptionHandlingThread/ExceptionWrapper/ThreadException (which
# is useful when dealing with multiple nearly-identical sibling IO
# threads) is superfluous, but it doesn't feel worth breaking
# things up further; we just ignore it for now.
wrapper = manager.exception()
if wrapper is not None:
if wrapper.type is ThreadException:
raise wrapper.value
else:
raise ThreadException([wrapper])
# TODO: cancel port forward on transport? Does that even make sense
# here (where we used direct-tcpip) vs the opposite method (which
# is what uses forward-tcpip)?
# TODO: probably push some of this down into Paramiko
@contextmanager
@opens
def forward_remote(
self,
remote_port,
local_port=None,
remote_host="127.0.0.1",
local_host="localhost",
):
"""
Open a tunnel connecting ``remote_port`` to the local environment.
For example, say you're running a daemon in development mode on your
workstation at port 8080, and want to funnel traffic to it from a
production or staging environment.
In most situations this isn't possible as your office/home network
probably blocks inbound traffic. But you have SSH access to this
server, so you can temporarily make port 8080 on that server act like
port 8080 on your workstation::
from fabric import Connection
c = Connection('my-remote-server')
with c.forward_remote(8080):
c.run("remote-data-writer --port 8080")
# Assuming remote-data-writer runs until interrupted, this will
# stay open until you Ctrl-C...
This method is analogous to using the ``-R`` option of OpenSSH's
``ssh`` program.
:param int remote_port: The remote port number on which to listen.
:param int local_port:
The local port number. Defaults to the same value as
``remote_port``.
:param str local_host:
The local hostname/interface the forwarded connection talks to.
Default: ``localhost``.
:param str remote_host:
The remote interface address to listen on when forwarding
connections. Default: ``127.0.0.1`` (i.e. only listen on the remote
localhost).
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not local_port:
local_port = remote_port
# Callback executes on each connection to the remote port and is given
# a Channel hooked up to said port. (We don't actually care about the
# source/dest host/port pairs at all; only whether the channel has data
# to read and suchlike.)
# We then pair that channel with a new 'outbound' socket connection to
# the local host/port being forwarded, in a new Tunnel.
# That Tunnel is then added to a shared data structure so we can track
# & close them during shutdown.
#
# TODO: this approach is less than ideal because we have to share state
# between ourselves & the callback handed into the transport's own
# thread handling (which is roughly analogous to our self-controlled
# TunnelManager for local forwarding). See if we can use more of
# Paramiko's API (or improve it and then do so) so that isn't
# necessary.
tunnels = []
def callback(channel, src_addr_tup, dst_addr_tup):
sock = socket.socket()
# TODO: handle connection failure such that channel, etc get closed
sock.connect((local_host, local_port))
# TODO: we don't actually need to generate the Events at our level,
# do we? Just let Tunnel.__init__ do it; all we do is "press its
# button" on shutdown...
tunnel = Tunnel(channel=channel, sock=sock, finished=Event())
tunnel.start()
# Communication between ourselves & the Paramiko handling subthread
tunnels.append(tunnel)
# Ask Paramiko (really, the remote sshd) to call our callback whenever
# connections are established on the remote iface/port.
# transport.request_port_forward(remote_host, remote_port, callback)
try:
self.transport.request_port_forward(
address=remote_host, port=remote_port, handler=callback
)
yield
finally:
# TODO: see above re: lack of a TunnelManager
# TODO: and/or also refactor with TunnelManager re: shutdown logic.
# E.g. maybe have a non-thread TunnelManager-alike with a method
# that acts as the callback? At least then there's a tiny bit more
# encapsulation...meh.
for tunnel in tunnels:
tunnel.finished.set()
tunnel.join()
self.transport.cancel_port_forward(
address=remote_host, port=remote_port
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.