repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
CLARIAH/grlc | src/prov.py | grlcPROV.add_used_entity | python | def add_used_entity(self, entity_uri):
entity_o = URIRef(entity_uri)
self.prov_g.add((entity_o, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, self.prov.used, entity_o)) | Add the provided URI as a used entity by the logged activity | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/prov.py#L72-L78 | null | class grlcPROV():
def __init__(self, user, repo):
"""
Default constructor
"""
self.user = user
self.repo = repo
self.prov_g = Graph()
prov_uri = URIRef("http://www.w3.org/ns/prov#")
self.prov = Namespace(prov_uri)
self.prov_g.bind('prov', self.prov)
self.agent = URIRef("http://{}".format(static.SERVER_NAME))
self.entity_d = URIRef("http://{}/api/{}/{}/spec".format(static.SERVER_NAME, self.user, self.repo))
self.activity = URIRef(self.entity_d + "-activity")
self.init_prov_graph()
def init_prov_graph(self):
"""
Initialize PROV graph with all we know at the start of the recording
"""
try:
# Use git2prov to get prov on the repo
repo_prov = check_output(
['node_modules/git2prov/bin/git2prov', 'https://github.com/{}/{}/'.format(self.user, self.repo),
'PROV-O']).decode("utf-8")
repo_prov = repo_prov[repo_prov.find('@'):]
# glogger.debug('Git2PROV output: {}'.format(repo_prov))
glogger.debug('Ingesting Git2PROV output into RDF graph')
with open('temp.prov.ttl', 'w') as temp_prov:
temp_prov.write(repo_prov)
self.prov_g.parse('temp.prov.ttl', format='turtle')
except Exception as e:
glogger.error(e)
glogger.error("Couldn't parse Git2PROV graph, continuing without repo PROV")
pass
self.prov_g.add((self.agent, RDF.type, self.prov.Agent))
self.prov_g.add((self.entity_d, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, RDF.type, self.prov.Activity))
# entity_d
self.prov_g.add((self.entity_d, self.prov.wasGeneratedBy, self.activity))
self.prov_g.add((self.entity_d, self.prov.wasAttributedTo, self.agent))
# later: entity_d genereated at time (when we know the end time)
# activity
self.prov_g.add((self.activity, self.prov.wasAssociatedWith, self.agent))
self.prov_g.add((self.activity, self.prov.startedAtTime, Literal(datetime.now())))
# later: activity used entity_o_1 ... entity_o_n
# later: activity endedAtTime (when we know the end time)
def end_prov_graph(self):
"""
Finalize prov recording with end time
"""
endTime = Literal(datetime.now())
self.prov_g.add((self.entity_d, self.prov.generatedAtTime, endTime))
self.prov_g.add((self.activity, self.prov.endedAtTime, endTime))
def log_prov_graph(self):
"""
Log provenance graph so far
"""
glogger.debug("Spec generation provenance graph:")
glogger.debug(self.prov_g.serialize(format='turtle'))
def serialize(self, format):
"""
Serialize provenance graph in the specified format
"""
if PY3:
return self.prov_g.serialize(format=format).decode('utf-8')
else:
return self.prov_g.serialize(format=format)
|
CLARIAH/grlc | src/prov.py | grlcPROV.end_prov_graph | python | def end_prov_graph(self):
endTime = Literal(datetime.now())
self.prov_g.add((self.entity_d, self.prov.generatedAtTime, endTime))
self.prov_g.add((self.activity, self.prov.endedAtTime, endTime)) | Finalize prov recording with end time | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/prov.py#L80-L86 | null | class grlcPROV():
def __init__(self, user, repo):
"""
Default constructor
"""
self.user = user
self.repo = repo
self.prov_g = Graph()
prov_uri = URIRef("http://www.w3.org/ns/prov#")
self.prov = Namespace(prov_uri)
self.prov_g.bind('prov', self.prov)
self.agent = URIRef("http://{}".format(static.SERVER_NAME))
self.entity_d = URIRef("http://{}/api/{}/{}/spec".format(static.SERVER_NAME, self.user, self.repo))
self.activity = URIRef(self.entity_d + "-activity")
self.init_prov_graph()
def init_prov_graph(self):
"""
Initialize PROV graph with all we know at the start of the recording
"""
try:
# Use git2prov to get prov on the repo
repo_prov = check_output(
['node_modules/git2prov/bin/git2prov', 'https://github.com/{}/{}/'.format(self.user, self.repo),
'PROV-O']).decode("utf-8")
repo_prov = repo_prov[repo_prov.find('@'):]
# glogger.debug('Git2PROV output: {}'.format(repo_prov))
glogger.debug('Ingesting Git2PROV output into RDF graph')
with open('temp.prov.ttl', 'w') as temp_prov:
temp_prov.write(repo_prov)
self.prov_g.parse('temp.prov.ttl', format='turtle')
except Exception as e:
glogger.error(e)
glogger.error("Couldn't parse Git2PROV graph, continuing without repo PROV")
pass
self.prov_g.add((self.agent, RDF.type, self.prov.Agent))
self.prov_g.add((self.entity_d, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, RDF.type, self.prov.Activity))
# entity_d
self.prov_g.add((self.entity_d, self.prov.wasGeneratedBy, self.activity))
self.prov_g.add((self.entity_d, self.prov.wasAttributedTo, self.agent))
# later: entity_d genereated at time (when we know the end time)
# activity
self.prov_g.add((self.activity, self.prov.wasAssociatedWith, self.agent))
self.prov_g.add((self.activity, self.prov.startedAtTime, Literal(datetime.now())))
# later: activity used entity_o_1 ... entity_o_n
# later: activity endedAtTime (when we know the end time)
def add_used_entity(self, entity_uri):
"""
Add the provided URI as a used entity by the logged activity
"""
entity_o = URIRef(entity_uri)
self.prov_g.add((entity_o, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, self.prov.used, entity_o))
def log_prov_graph(self):
"""
Log provenance graph so far
"""
glogger.debug("Spec generation provenance graph:")
glogger.debug(self.prov_g.serialize(format='turtle'))
def serialize(self, format):
"""
Serialize provenance graph in the specified format
"""
if PY3:
return self.prov_g.serialize(format=format).decode('utf-8')
else:
return self.prov_g.serialize(format=format)
|
CLARIAH/grlc | src/prov.py | grlcPROV.log_prov_graph | python | def log_prov_graph(self):
glogger.debug("Spec generation provenance graph:")
glogger.debug(self.prov_g.serialize(format='turtle')) | Log provenance graph so far | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/prov.py#L88-L93 | null | class grlcPROV():
def __init__(self, user, repo):
"""
Default constructor
"""
self.user = user
self.repo = repo
self.prov_g = Graph()
prov_uri = URIRef("http://www.w3.org/ns/prov#")
self.prov = Namespace(prov_uri)
self.prov_g.bind('prov', self.prov)
self.agent = URIRef("http://{}".format(static.SERVER_NAME))
self.entity_d = URIRef("http://{}/api/{}/{}/spec".format(static.SERVER_NAME, self.user, self.repo))
self.activity = URIRef(self.entity_d + "-activity")
self.init_prov_graph()
def init_prov_graph(self):
"""
Initialize PROV graph with all we know at the start of the recording
"""
try:
# Use git2prov to get prov on the repo
repo_prov = check_output(
['node_modules/git2prov/bin/git2prov', 'https://github.com/{}/{}/'.format(self.user, self.repo),
'PROV-O']).decode("utf-8")
repo_prov = repo_prov[repo_prov.find('@'):]
# glogger.debug('Git2PROV output: {}'.format(repo_prov))
glogger.debug('Ingesting Git2PROV output into RDF graph')
with open('temp.prov.ttl', 'w') as temp_prov:
temp_prov.write(repo_prov)
self.prov_g.parse('temp.prov.ttl', format='turtle')
except Exception as e:
glogger.error(e)
glogger.error("Couldn't parse Git2PROV graph, continuing without repo PROV")
pass
self.prov_g.add((self.agent, RDF.type, self.prov.Agent))
self.prov_g.add((self.entity_d, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, RDF.type, self.prov.Activity))
# entity_d
self.prov_g.add((self.entity_d, self.prov.wasGeneratedBy, self.activity))
self.prov_g.add((self.entity_d, self.prov.wasAttributedTo, self.agent))
# later: entity_d genereated at time (when we know the end time)
# activity
self.prov_g.add((self.activity, self.prov.wasAssociatedWith, self.agent))
self.prov_g.add((self.activity, self.prov.startedAtTime, Literal(datetime.now())))
# later: activity used entity_o_1 ... entity_o_n
# later: activity endedAtTime (when we know the end time)
def add_used_entity(self, entity_uri):
"""
Add the provided URI as a used entity by the logged activity
"""
entity_o = URIRef(entity_uri)
self.prov_g.add((entity_o, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, self.prov.used, entity_o))
def end_prov_graph(self):
"""
Finalize prov recording with end time
"""
endTime = Literal(datetime.now())
self.prov_g.add((self.entity_d, self.prov.generatedAtTime, endTime))
self.prov_g.add((self.activity, self.prov.endedAtTime, endTime))
def serialize(self, format):
"""
Serialize provenance graph in the specified format
"""
if PY3:
return self.prov_g.serialize(format=format).decode('utf-8')
else:
return self.prov_g.serialize(format=format)
|
CLARIAH/grlc | src/prov.py | grlcPROV.serialize | python | def serialize(self, format):
if PY3:
return self.prov_g.serialize(format=format).decode('utf-8')
else:
return self.prov_g.serialize(format=format) | Serialize provenance graph in the specified format | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/prov.py#L95-L102 | null | class grlcPROV():
def __init__(self, user, repo):
"""
Default constructor
"""
self.user = user
self.repo = repo
self.prov_g = Graph()
prov_uri = URIRef("http://www.w3.org/ns/prov#")
self.prov = Namespace(prov_uri)
self.prov_g.bind('prov', self.prov)
self.agent = URIRef("http://{}".format(static.SERVER_NAME))
self.entity_d = URIRef("http://{}/api/{}/{}/spec".format(static.SERVER_NAME, self.user, self.repo))
self.activity = URIRef(self.entity_d + "-activity")
self.init_prov_graph()
def init_prov_graph(self):
"""
Initialize PROV graph with all we know at the start of the recording
"""
try:
# Use git2prov to get prov on the repo
repo_prov = check_output(
['node_modules/git2prov/bin/git2prov', 'https://github.com/{}/{}/'.format(self.user, self.repo),
'PROV-O']).decode("utf-8")
repo_prov = repo_prov[repo_prov.find('@'):]
# glogger.debug('Git2PROV output: {}'.format(repo_prov))
glogger.debug('Ingesting Git2PROV output into RDF graph')
with open('temp.prov.ttl', 'w') as temp_prov:
temp_prov.write(repo_prov)
self.prov_g.parse('temp.prov.ttl', format='turtle')
except Exception as e:
glogger.error(e)
glogger.error("Couldn't parse Git2PROV graph, continuing without repo PROV")
pass
self.prov_g.add((self.agent, RDF.type, self.prov.Agent))
self.prov_g.add((self.entity_d, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, RDF.type, self.prov.Activity))
# entity_d
self.prov_g.add((self.entity_d, self.prov.wasGeneratedBy, self.activity))
self.prov_g.add((self.entity_d, self.prov.wasAttributedTo, self.agent))
# later: entity_d genereated at time (when we know the end time)
# activity
self.prov_g.add((self.activity, self.prov.wasAssociatedWith, self.agent))
self.prov_g.add((self.activity, self.prov.startedAtTime, Literal(datetime.now())))
# later: activity used entity_o_1 ... entity_o_n
# later: activity endedAtTime (when we know the end time)
def add_used_entity(self, entity_uri):
"""
Add the provided URI as a used entity by the logged activity
"""
entity_o = URIRef(entity_uri)
self.prov_g.add((entity_o, RDF.type, self.prov.Entity))
self.prov_g.add((self.activity, self.prov.used, entity_o))
def end_prov_graph(self):
"""
Finalize prov recording with end time
"""
endTime = Literal(datetime.now())
self.prov_g.add((self.entity_d, self.prov.generatedAtTime, endTime))
self.prov_g.add((self.activity, self.prov.endedAtTime, endTime))
def log_prov_graph(self):
"""
Log provenance graph so far
"""
glogger.debug("Spec generation provenance graph:")
glogger.debug(self.prov_g.serialize(format='turtle'))
|
CLARIAH/grlc | src/cache.py | init_cache | python | def init_cache():
'''
Initializes the grlc cache (json file)
'''
cache_obj = json.loads("{}")
try:
with open(CACHE_NAME, 'r') as cache_file:
try:
cache_obj = json.load(cache_file)
except ValueError:
print("The cache file seems to be empty, starting with flushed cache")
except IOError:
print("The cache file seems to be empty, starting with flushed cache")
print("Loaded JSON cache")
return cache_obj | Initializes the grlc cache (json file) | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/cache.py#L12-L28 | null | #!/usr/bin/env python
# cache.py: grlc spec caching utilities
import json
import urllib.request, urllib.error, urllib.parse
import logging
# Name of the cache json file
CACHE_NAME = "db-cache.json"
glogger = logging.getLogger(__name__)
def is_cache_updated(cache_obj, repo_uri):
if repo_uri not in cache_obj:
return False
cache_date = cache_obj[repo_uri]['date']
stream = urllib.request.urlopen(repo_uri)
resp = json.load(stream)
github_date = resp['pushed_at']
return cache_date > github_date
|
CLARIAH/grlc | src/gquery.py | guess_endpoint_uri | python | def guess_endpoint_uri(rq, gh_repo):
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
if has_request_context() and "endpoint" in request.args:
endpoint = request.args['endpoint']
glogger.info("Endpoint provided in request: " + endpoint)
return endpoint, auth
# Decorator
try:
decorators = get_yaml_decorators(rq)
endpoint = decorators['endpoint']
auth = None
glogger.info("Decorator guessed endpoint: " + endpoint)
except (TypeError, KeyError):
# File
try:
endpoint_content = gh_repo.getTextFor({'download_url': 'endpoint.txt'})
endpoint = endpoint_content.strip().splitlines()[0]
auth = None
glogger.info("File guessed endpoint: " + endpoint)
# TODO: except all is really ugly
except:
# Default
endpoint = static.DEFAULT_ENDPOINT
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
glogger.warning("No endpoint specified, using default ({})".format(endpoint))
return endpoint, auth | Guesses the endpoint URI from (in this order):
- An endpoint parameter in URL
- An #+endpoint decorator
- A endpoint.txt file in the repo
Otherwise assigns a default one | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/gquery.py#L26-L65 | [
"def get_yaml_decorators(rq):\n \"\"\"\n Returns the yaml decorator metadata only (this is needed by triple pattern fragments)\n \"\"\"\n # glogger.debug('Guessing decorators for query {}'.format(rq))\n if not rq:\n return None\n\n if isinstance(rq, dict) and 'grlc' in rq: # json query (sp... | #!/usr/bin/env python
# gquery.py: functions that deal with / transform SPARQL queries in grlc
import yaml
import json
from rdflib.plugins.sparql.parser import Query, UpdateUnit
from rdflib.plugins.sparql.processor import translateQuery
from flask import request, has_request_context
from pyparsing import ParseException
import SPARQLTransformer
import logging
from pprint import pformat
import traceback
import re
import requests
# grlc modules
import grlc.static as static
glogger = logging.getLogger(__name__)
XSD_PREFIX = 'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
def count_query_results(query, endpoint):
"""
Returns the total number of results that query 'query' will generate
WARNING: This is too expensive just for providing a number of result pages
Providing a dummy count for now
"""
# number_results_query, repl = re.subn("SELECT.*FROM", "SELECT COUNT (*) FROM", query)
# if not repl:
# number_results_query = re.sub("SELECT.*{", "SELECT COUNT(*) {", query)
# number_results_query = re.sub("GROUP\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("ORDER\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("LIMIT\s+[0-9]+", "", number_results_query)
# number_results_query = re.sub("OFFSET\s+[0-9]+", "", number_results_query)
#
# glogger.debug("Query for result count: " + number_results_query)
#
# # Preapre HTTP request
# headers = { 'Accept' : 'application/json' }
# data = { 'query' : number_results_query }
# count_json = requests.get(endpoint, params=data, headers=headers).json()
# count = int(count_json['results']['bindings'][0]['callret-0']['value'])
# glogger.info("Paginated query has {} results in total".format(count))
#
# return count
return 1000
def _getDictWithKey(key, dict_list):
""" Returns the first dictionary in dict_list which contains the given key"""
for d in dict_list:
if key in d:
return d
return None
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
"""
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
"""
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters
def get_defaults(rq, v, metadata):
"""
Returns the default value for a parameter or None
"""
glogger.debug("Metadata with defaults: {}".format(metadata))
if 'defaults' not in metadata:
return None
defaultsDict = _getDictWithKey(v, metadata['defaults'])
if defaultsDict:
return defaultsDict[v]
return None
def get_enumeration(rq, v, endpoint, metadata={}, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
# glogger.debug("Metadata before processing enums: {}".format(metadata))
# We only fire the enum filling queries if indicated by the query metadata
if 'enumerate' not in metadata:
return None
enumDict = _getDictWithKey(v, metadata['enumerate'])
if enumDict:
return enumDict[v]
if v in metadata['enumerate']:
return get_enumeration_sparql(rq, v, endpoint, auth)
return None
def get_enumeration_sparql(rq, v, endpoint, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
glogger.info('Retrieving enumeration for variable {}'.format(v))
vcodes = []
# tpattern_matcher = re.compile(".*(FROM\s+)?(?P<gnames>.*)\s+WHERE.*[\.\{][\n\t\s]*(?P<tpattern>.*\?" + re.escape(v) + ".*?\.).*", flags=re.DOTALL)
# tpattern_matcher = re.compile(".*?((FROM\s*)(?P<gnames>(\<.*\>)+))?\s*WHERE\s*\{(?P<tpattern>.*)\}.*", flags=re.DOTALL)
# WHERE is optional too!!
tpattern_matcher = re.compile(".*?(FROM\s*(?P<gnames>\<.*\>+))?\s*(WHERE\s*)?\{(?P<tpattern>.*)\}.*",
flags=re.DOTALL)
glogger.debug(rq)
tp_match = tpattern_matcher.match(rq)
if tp_match:
vtpattern = tp_match.group('tpattern')
gnames = tp_match.group('gnames')
glogger.debug("Detected graph names: {}".format(gnames))
glogger.debug("Detected BGP: {}".format(vtpattern))
glogger.debug("Matched triple pattern with parameter")
if gnames:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " FROM " + gnames + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
else:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
glogger.debug("Codes subquery: {}".format(codes_subquery))
glogger.debug(endpoint)
codes_json = requests.get(endpoint, params={'query': codes_subquery},
headers={'Accept': static.mimetypes['json'],
'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}, auth=auth).json()
for code in codes_json['results']['bindings']:
vcodes.append(list(code.values())[0]["value"])
else:
glogger.debug("No match between variable name and query.")
return vcodes
def get_yaml_decorators(rq):
"""
Returns the yaml decorator metadata only (this is needed by triple pattern fragments)
"""
# glogger.debug('Guessing decorators for query {}'.format(rq))
if not rq:
return None
if isinstance(rq, dict) and 'grlc' in rq: # json query (sparql transformer)
yaml_string = rq['grlc']
query_string = rq
else: # classic query
yaml_string = "\n".join([row.lstrip('#+') for row in rq.split('\n') if row.startswith('#+')])
query_string = "\n".join([row for row in rq.split('\n') if not row.startswith('#+')])
query_metadata = None
if type(yaml_string) == dict:
query_metadata = yaml_string
elif type(yaml_string) == str:
try: # Invalid YAMLs will produce empty metadata
query_metadata = yaml.load(yaml_string)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
try:
query_metadata = json.loads(yaml_string)
except json.JSONDecodeError:
glogger.warning("Query decorators could not be parsed; check your YAML syntax")
# If there is no YAML string
if query_metadata is None:
query_metadata = {}
query_metadata['query'] = query_string
# glogger.debug("Parsed query decorators: {}".format(query_metadata))
return query_metadata
def enable_custom_function_prefix(rq, prefix):
if ' %s:' % prefix in rq or '(%s:' % prefix in rq and not 'PREFIX %s:' % prefix in rq:
rq = 'PREFIX %s: <:%s>\n' % (prefix, prefix) + rq
return rq
def get_metadata(rq, endpoint):
"""
Returns the metadata 'exp' parsed from the raw query file 'rq'
'exp' is one of: 'endpoint', 'tags', 'summary', 'request', 'pagination', 'enumerate'
"""
query_metadata = get_yaml_decorators(rq)
query_metadata['type'] = 'UNKNOWN'
query_metadata['original_query'] = rq
if isinstance(rq, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(rq)
rq = rq.strip()
query_metadata['proto'] = proto
query_metadata['opt'] = opt
query_metadata['query'] = rq
rq = enable_custom_function_prefix(rq, 'bif')
rq = enable_custom_function_prefix(rq, 'sql')
try:
# THE PARSING
# select, describe, construct, ask
parsed_query = translateQuery(Query.parseString(rq, parseAll=True))
query_metadata['type'] = parsed_query.algebra.name
if query_metadata['type'] == 'SelectQuery':
# Projection variables
query_metadata['variables'] = parsed_query.algebra['PV']
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
elif query_metadata['type'] == 'ConstructQuery':
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
else:
glogger.warning(
"Query type {} is currently unsupported and no metadata was parsed!".format(query_metadata['type']))
except ParseException as pe:
glogger.warning(pe)
glogger.warning("Could not parse regular SELECT, CONSTRUCT, DESCRIBE or ASK query")
# glogger.warning(traceback.print_exc())
# insert queries won't parse, so we regex
# glogger.info("Trying to parse INSERT query")
# if static.INSERT_PATTERN in rq:
# query_metadata['type'] = 'InsertQuery'
# query_metadata['parameters'] = [u'_g_iri']
try:
# update query
glogger.info("Trying to parse UPDATE query")
parsed_query = UpdateUnit.parseString(rq, parseAll=True)
glogger.info(parsed_query)
query_metadata['type'] = parsed_query[0]['request'][0].name
if query_metadata['type'] == 'InsertData':
query_metadata['parameters'] = {
'g': {'datatype': None, 'enum': [], 'lang': None, 'name': 'g', 'original': '?_g_iri',
'required': True, 'type': 'iri'},
'data': {'datatype': None, 'enum': [], 'lang': None, 'name': 'data', 'original': '?_data',
'required': True, 'type': 'literal'}}
glogger.info("Update query parsed with {}".format(query_metadata['type']))
# if query_metadata['type'] == 'InsertData':
# query_metadata['variables'] = parsed_query.algebra['PV']
except:
glogger.error("Could not parse query")
glogger.error(query_metadata['query'])
glogger.error(traceback.print_exc())
pass
glogger.debug("Finished parsing query of type {}".format(query_metadata['type']))
glogger.debug("All parsed query metadata (from decorators and content): ")
glogger.debug(pformat(query_metadata, indent=32))
return query_metadata
def paginate_query(query, results_per_page, get_args):
page = get_args.get('page', 1)
glogger.info("Paginating query for page {}, {} results per page".format(page, results_per_page))
# If contains LIMIT or OFFSET, remove them
glogger.debug("Original query: " + query)
no_limit_query = re.sub("((LIMIT|OFFSET)\s+[0-9]+)*", "", query)
glogger.debug("No limit query: " + no_limit_query)
# Append LIMIT results_per_page OFFSET (page-1)*results_per_page
paginated_query = no_limit_query + " LIMIT {} OFFSET {}".format(results_per_page,
(int(page) - 1) * results_per_page)
glogger.debug("Paginated query: " + paginated_query)
return paginated_query
def rewrite_query(query, parameters, get_args):
glogger.debug("Query parameters")
glogger.debug(parameters)
requireXSD = False
required_params = {}
for k, v in parameters.items():
if parameters[k]['required']:
required_params[k] = v
requiredParams = set(required_params.keys())
providedParams = set(get_args.keys())
glogger.debug("Required parameters: {} Request args: {}".format(requiredParams, providedParams))
assert requiredParams.issubset(providedParams), 'Provided parameters do not cover the required parameters!'
for pname, p in list(parameters.items()):
# Get the parameter value from the GET request
v = get_args.get(pname, None)
# If the parameter has a value
if not v:
continue
if isinstance(query, dict): # json query (sparql transformer)
if '$values' not in query:
query['$values'] = {}
values = query['$values']
if not p['original'] in values:
values[p['original']] = v
elif isinstance(values[p['original']], list):
values[p['original']].append(v)
else:
values[p['original']] = [values[p['original']], v]
continue
# IRI
if p['type'] == 'iri': # TODO: never reached anymore, since iris are now type=string with format=iri
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# A number (without a datatype)
elif p['type'] == 'number':
query = query.replace(p['original'], v)
# Literals
elif p['type'] == 'literal' or p['type'] == 'string':
# If it's a iri
if 'format' in p and p['format'] == 'iri':
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# If there is a language tag
if 'lang' in p and p['lang']:
query = query.replace(p['original'], "\"{}\"@{}".format(v, p['lang']))
elif 'datatype' in p and p['datatype']:
query = query.replace(p['original'], "\"{}\"^^{}".format(v, p['datatype']))
if 'xsd' in p['datatype']:
requireXSD = True
else:
query = query.replace(p['original'], "\"{}\"".format(v))
if isinstance(query, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(query)
query = rq.strip()
if requireXSD and XSD_PREFIX not in query:
query = query.replace('SELECT', XSD_PREFIX + '\n\nSELECT')
glogger.debug("Query rewritten as: " + query)
return query
|
CLARIAH/grlc | src/gquery.py | get_parameters | python | def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters | ?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax. | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/gquery.py#L105-L180 | [
"def get_enumeration(rq, v, endpoint, metadata={}, auth=None):\n \"\"\"\n Returns a list of enumerated values for variable 'v' in query 'rq'\n \"\"\"\n # glogger.debug(\"Metadata before processing enums: {}\".format(metadata))\n # We only fire the enum filling queries if indicated by the query metada... | #!/usr/bin/env python
# gquery.py: functions that deal with / transform SPARQL queries in grlc
import yaml
import json
from rdflib.plugins.sparql.parser import Query, UpdateUnit
from rdflib.plugins.sparql.processor import translateQuery
from flask import request, has_request_context
from pyparsing import ParseException
import SPARQLTransformer
import logging
from pprint import pformat
import traceback
import re
import requests
# grlc modules
import grlc.static as static
glogger = logging.getLogger(__name__)
XSD_PREFIX = 'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
def guess_endpoint_uri(rq, gh_repo):
"""
Guesses the endpoint URI from (in this order):
- An endpoint parameter in URL
- An #+endpoint decorator
- A endpoint.txt file in the repo
Otherwise assigns a default one
"""
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
if has_request_context() and "endpoint" in request.args:
endpoint = request.args['endpoint']
glogger.info("Endpoint provided in request: " + endpoint)
return endpoint, auth
# Decorator
try:
decorators = get_yaml_decorators(rq)
endpoint = decorators['endpoint']
auth = None
glogger.info("Decorator guessed endpoint: " + endpoint)
except (TypeError, KeyError):
# File
try:
endpoint_content = gh_repo.getTextFor({'download_url': 'endpoint.txt'})
endpoint = endpoint_content.strip().splitlines()[0]
auth = None
glogger.info("File guessed endpoint: " + endpoint)
# TODO: except all is really ugly
except:
# Default
endpoint = static.DEFAULT_ENDPOINT
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
glogger.warning("No endpoint specified, using default ({})".format(endpoint))
return endpoint, auth
def count_query_results(query, endpoint):
"""
Returns the total number of results that query 'query' will generate
WARNING: This is too expensive just for providing a number of result pages
Providing a dummy count for now
"""
# number_results_query, repl = re.subn("SELECT.*FROM", "SELECT COUNT (*) FROM", query)
# if not repl:
# number_results_query = re.sub("SELECT.*{", "SELECT COUNT(*) {", query)
# number_results_query = re.sub("GROUP\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("ORDER\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("LIMIT\s+[0-9]+", "", number_results_query)
# number_results_query = re.sub("OFFSET\s+[0-9]+", "", number_results_query)
#
# glogger.debug("Query for result count: " + number_results_query)
#
# # Preapre HTTP request
# headers = { 'Accept' : 'application/json' }
# data = { 'query' : number_results_query }
# count_json = requests.get(endpoint, params=data, headers=headers).json()
# count = int(count_json['results']['bindings'][0]['callret-0']['value'])
# glogger.info("Paginated query has {} results in total".format(count))
#
# return count
return 1000
def _getDictWithKey(key, dict_list):
""" Returns the first dictionary in dict_list which contains the given key"""
for d in dict_list:
if key in d:
return d
return None
def get_defaults(rq, v, metadata):
"""
Returns the default value for a parameter or None
"""
glogger.debug("Metadata with defaults: {}".format(metadata))
if 'defaults' not in metadata:
return None
defaultsDict = _getDictWithKey(v, metadata['defaults'])
if defaultsDict:
return defaultsDict[v]
return None
def get_enumeration(rq, v, endpoint, metadata={}, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
# glogger.debug("Metadata before processing enums: {}".format(metadata))
# We only fire the enum filling queries if indicated by the query metadata
if 'enumerate' not in metadata:
return None
enumDict = _getDictWithKey(v, metadata['enumerate'])
if enumDict:
return enumDict[v]
if v in metadata['enumerate']:
return get_enumeration_sparql(rq, v, endpoint, auth)
return None
def get_enumeration_sparql(rq, v, endpoint, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
glogger.info('Retrieving enumeration for variable {}'.format(v))
vcodes = []
# tpattern_matcher = re.compile(".*(FROM\s+)?(?P<gnames>.*)\s+WHERE.*[\.\{][\n\t\s]*(?P<tpattern>.*\?" + re.escape(v) + ".*?\.).*", flags=re.DOTALL)
# tpattern_matcher = re.compile(".*?((FROM\s*)(?P<gnames>(\<.*\>)+))?\s*WHERE\s*\{(?P<tpattern>.*)\}.*", flags=re.DOTALL)
# WHERE is optional too!!
tpattern_matcher = re.compile(".*?(FROM\s*(?P<gnames>\<.*\>+))?\s*(WHERE\s*)?\{(?P<tpattern>.*)\}.*",
flags=re.DOTALL)
glogger.debug(rq)
tp_match = tpattern_matcher.match(rq)
if tp_match:
vtpattern = tp_match.group('tpattern')
gnames = tp_match.group('gnames')
glogger.debug("Detected graph names: {}".format(gnames))
glogger.debug("Detected BGP: {}".format(vtpattern))
glogger.debug("Matched triple pattern with parameter")
if gnames:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " FROM " + gnames + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
else:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
glogger.debug("Codes subquery: {}".format(codes_subquery))
glogger.debug(endpoint)
codes_json = requests.get(endpoint, params={'query': codes_subquery},
headers={'Accept': static.mimetypes['json'],
'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}, auth=auth).json()
for code in codes_json['results']['bindings']:
vcodes.append(list(code.values())[0]["value"])
else:
glogger.debug("No match between variable name and query.")
return vcodes
def get_yaml_decorators(rq):
"""
Returns the yaml decorator metadata only (this is needed by triple pattern fragments)
"""
# glogger.debug('Guessing decorators for query {}'.format(rq))
if not rq:
return None
if isinstance(rq, dict) and 'grlc' in rq: # json query (sparql transformer)
yaml_string = rq['grlc']
query_string = rq
else: # classic query
yaml_string = "\n".join([row.lstrip('#+') for row in rq.split('\n') if row.startswith('#+')])
query_string = "\n".join([row for row in rq.split('\n') if not row.startswith('#+')])
query_metadata = None
if type(yaml_string) == dict:
query_metadata = yaml_string
elif type(yaml_string) == str:
try: # Invalid YAMLs will produce empty metadata
query_metadata = yaml.load(yaml_string)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
try:
query_metadata = json.loads(yaml_string)
except json.JSONDecodeError:
glogger.warning("Query decorators could not be parsed; check your YAML syntax")
# If there is no YAML string
if query_metadata is None:
query_metadata = {}
query_metadata['query'] = query_string
# glogger.debug("Parsed query decorators: {}".format(query_metadata))
return query_metadata
def enable_custom_function_prefix(rq, prefix):
if ' %s:' % prefix in rq or '(%s:' % prefix in rq and not 'PREFIX %s:' % prefix in rq:
rq = 'PREFIX %s: <:%s>\n' % (prefix, prefix) + rq
return rq
def get_metadata(rq, endpoint):
"""
Returns the metadata 'exp' parsed from the raw query file 'rq'
'exp' is one of: 'endpoint', 'tags', 'summary', 'request', 'pagination', 'enumerate'
"""
query_metadata = get_yaml_decorators(rq)
query_metadata['type'] = 'UNKNOWN'
query_metadata['original_query'] = rq
if isinstance(rq, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(rq)
rq = rq.strip()
query_metadata['proto'] = proto
query_metadata['opt'] = opt
query_metadata['query'] = rq
rq = enable_custom_function_prefix(rq, 'bif')
rq = enable_custom_function_prefix(rq, 'sql')
try:
# THE PARSING
# select, describe, construct, ask
parsed_query = translateQuery(Query.parseString(rq, parseAll=True))
query_metadata['type'] = parsed_query.algebra.name
if query_metadata['type'] == 'SelectQuery':
# Projection variables
query_metadata['variables'] = parsed_query.algebra['PV']
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
elif query_metadata['type'] == 'ConstructQuery':
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
else:
glogger.warning(
"Query type {} is currently unsupported and no metadata was parsed!".format(query_metadata['type']))
except ParseException as pe:
glogger.warning(pe)
glogger.warning("Could not parse regular SELECT, CONSTRUCT, DESCRIBE or ASK query")
# glogger.warning(traceback.print_exc())
# insert queries won't parse, so we regex
# glogger.info("Trying to parse INSERT query")
# if static.INSERT_PATTERN in rq:
# query_metadata['type'] = 'InsertQuery'
# query_metadata['parameters'] = [u'_g_iri']
try:
# update query
glogger.info("Trying to parse UPDATE query")
parsed_query = UpdateUnit.parseString(rq, parseAll=True)
glogger.info(parsed_query)
query_metadata['type'] = parsed_query[0]['request'][0].name
if query_metadata['type'] == 'InsertData':
query_metadata['parameters'] = {
'g': {'datatype': None, 'enum': [], 'lang': None, 'name': 'g', 'original': '?_g_iri',
'required': True, 'type': 'iri'},
'data': {'datatype': None, 'enum': [], 'lang': None, 'name': 'data', 'original': '?_data',
'required': True, 'type': 'literal'}}
glogger.info("Update query parsed with {}".format(query_metadata['type']))
# if query_metadata['type'] == 'InsertData':
# query_metadata['variables'] = parsed_query.algebra['PV']
except:
glogger.error("Could not parse query")
glogger.error(query_metadata['query'])
glogger.error(traceback.print_exc())
pass
glogger.debug("Finished parsing query of type {}".format(query_metadata['type']))
glogger.debug("All parsed query metadata (from decorators and content): ")
glogger.debug(pformat(query_metadata, indent=32))
return query_metadata
def paginate_query(query, results_per_page, get_args):
page = get_args.get('page', 1)
glogger.info("Paginating query for page {}, {} results per page".format(page, results_per_page))
# If contains LIMIT or OFFSET, remove them
glogger.debug("Original query: " + query)
no_limit_query = re.sub("((LIMIT|OFFSET)\s+[0-9]+)*", "", query)
glogger.debug("No limit query: " + no_limit_query)
# Append LIMIT results_per_page OFFSET (page-1)*results_per_page
paginated_query = no_limit_query + " LIMIT {} OFFSET {}".format(results_per_page,
(int(page) - 1) * results_per_page)
glogger.debug("Paginated query: " + paginated_query)
return paginated_query
def rewrite_query(query, parameters, get_args):
glogger.debug("Query parameters")
glogger.debug(parameters)
requireXSD = False
required_params = {}
for k, v in parameters.items():
if parameters[k]['required']:
required_params[k] = v
requiredParams = set(required_params.keys())
providedParams = set(get_args.keys())
glogger.debug("Required parameters: {} Request args: {}".format(requiredParams, providedParams))
assert requiredParams.issubset(providedParams), 'Provided parameters do not cover the required parameters!'
for pname, p in list(parameters.items()):
# Get the parameter value from the GET request
v = get_args.get(pname, None)
# If the parameter has a value
if not v:
continue
if isinstance(query, dict): # json query (sparql transformer)
if '$values' not in query:
query['$values'] = {}
values = query['$values']
if not p['original'] in values:
values[p['original']] = v
elif isinstance(values[p['original']], list):
values[p['original']].append(v)
else:
values[p['original']] = [values[p['original']], v]
continue
# IRI
if p['type'] == 'iri': # TODO: never reached anymore, since iris are now type=string with format=iri
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# A number (without a datatype)
elif p['type'] == 'number':
query = query.replace(p['original'], v)
# Literals
elif p['type'] == 'literal' or p['type'] == 'string':
# If it's a iri
if 'format' in p and p['format'] == 'iri':
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# If there is a language tag
if 'lang' in p and p['lang']:
query = query.replace(p['original'], "\"{}\"@{}".format(v, p['lang']))
elif 'datatype' in p and p['datatype']:
query = query.replace(p['original'], "\"{}\"^^{}".format(v, p['datatype']))
if 'xsd' in p['datatype']:
requireXSD = True
else:
query = query.replace(p['original'], "\"{}\"".format(v))
if isinstance(query, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(query)
query = rq.strip()
if requireXSD and XSD_PREFIX not in query:
query = query.replace('SELECT', XSD_PREFIX + '\n\nSELECT')
glogger.debug("Query rewritten as: " + query)
return query
|
CLARIAH/grlc | src/gquery.py | get_defaults | python | def get_defaults(rq, v, metadata):
glogger.debug("Metadata with defaults: {}".format(metadata))
if 'defaults' not in metadata:
return None
defaultsDict = _getDictWithKey(v, metadata['defaults'])
if defaultsDict:
return defaultsDict[v]
return None | Returns the default value for a parameter or None | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/gquery.py#L183-L193 | [
"def _getDictWithKey(key, dict_list):\n \"\"\" Returns the first dictionary in dict_list which contains the given key\"\"\"\n for d in dict_list:\n if key in d:\n return d\n return None\n"
] | #!/usr/bin/env python
# gquery.py: functions that deal with / transform SPARQL queries in grlc
import yaml
import json
from rdflib.plugins.sparql.parser import Query, UpdateUnit
from rdflib.plugins.sparql.processor import translateQuery
from flask import request, has_request_context
from pyparsing import ParseException
import SPARQLTransformer
import logging
from pprint import pformat
import traceback
import re
import requests
# grlc modules
import grlc.static as static
glogger = logging.getLogger(__name__)
XSD_PREFIX = 'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
def guess_endpoint_uri(rq, gh_repo):
"""
Guesses the endpoint URI from (in this order):
- An endpoint parameter in URL
- An #+endpoint decorator
- A endpoint.txt file in the repo
Otherwise assigns a default one
"""
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
if has_request_context() and "endpoint" in request.args:
endpoint = request.args['endpoint']
glogger.info("Endpoint provided in request: " + endpoint)
return endpoint, auth
# Decorator
try:
decorators = get_yaml_decorators(rq)
endpoint = decorators['endpoint']
auth = None
glogger.info("Decorator guessed endpoint: " + endpoint)
except (TypeError, KeyError):
# File
try:
endpoint_content = gh_repo.getTextFor({'download_url': 'endpoint.txt'})
endpoint = endpoint_content.strip().splitlines()[0]
auth = None
glogger.info("File guessed endpoint: " + endpoint)
# TODO: except all is really ugly
except:
# Default
endpoint = static.DEFAULT_ENDPOINT
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
glogger.warning("No endpoint specified, using default ({})".format(endpoint))
return endpoint, auth
def count_query_results(query, endpoint):
"""
Returns the total number of results that query 'query' will generate
WARNING: This is too expensive just for providing a number of result pages
Providing a dummy count for now
"""
# number_results_query, repl = re.subn("SELECT.*FROM", "SELECT COUNT (*) FROM", query)
# if not repl:
# number_results_query = re.sub("SELECT.*{", "SELECT COUNT(*) {", query)
# number_results_query = re.sub("GROUP\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("ORDER\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("LIMIT\s+[0-9]+", "", number_results_query)
# number_results_query = re.sub("OFFSET\s+[0-9]+", "", number_results_query)
#
# glogger.debug("Query for result count: " + number_results_query)
#
# # Preapre HTTP request
# headers = { 'Accept' : 'application/json' }
# data = { 'query' : number_results_query }
# count_json = requests.get(endpoint, params=data, headers=headers).json()
# count = int(count_json['results']['bindings'][0]['callret-0']['value'])
# glogger.info("Paginated query has {} results in total".format(count))
#
# return count
return 1000
def _getDictWithKey(key, dict_list):
""" Returns the first dictionary in dict_list which contains the given key"""
for d in dict_list:
if key in d:
return d
return None
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
"""
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
"""
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters
def get_enumeration(rq, v, endpoint, metadata={}, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
# glogger.debug("Metadata before processing enums: {}".format(metadata))
# We only fire the enum filling queries if indicated by the query metadata
if 'enumerate' not in metadata:
return None
enumDict = _getDictWithKey(v, metadata['enumerate'])
if enumDict:
return enumDict[v]
if v in metadata['enumerate']:
return get_enumeration_sparql(rq, v, endpoint, auth)
return None
def get_enumeration_sparql(rq, v, endpoint, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
glogger.info('Retrieving enumeration for variable {}'.format(v))
vcodes = []
# tpattern_matcher = re.compile(".*(FROM\s+)?(?P<gnames>.*)\s+WHERE.*[\.\{][\n\t\s]*(?P<tpattern>.*\?" + re.escape(v) + ".*?\.).*", flags=re.DOTALL)
# tpattern_matcher = re.compile(".*?((FROM\s*)(?P<gnames>(\<.*\>)+))?\s*WHERE\s*\{(?P<tpattern>.*)\}.*", flags=re.DOTALL)
# WHERE is optional too!!
tpattern_matcher = re.compile(".*?(FROM\s*(?P<gnames>\<.*\>+))?\s*(WHERE\s*)?\{(?P<tpattern>.*)\}.*",
flags=re.DOTALL)
glogger.debug(rq)
tp_match = tpattern_matcher.match(rq)
if tp_match:
vtpattern = tp_match.group('tpattern')
gnames = tp_match.group('gnames')
glogger.debug("Detected graph names: {}".format(gnames))
glogger.debug("Detected BGP: {}".format(vtpattern))
glogger.debug("Matched triple pattern with parameter")
if gnames:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " FROM " + gnames + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
else:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
glogger.debug("Codes subquery: {}".format(codes_subquery))
glogger.debug(endpoint)
codes_json = requests.get(endpoint, params={'query': codes_subquery},
headers={'Accept': static.mimetypes['json'],
'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}, auth=auth).json()
for code in codes_json['results']['bindings']:
vcodes.append(list(code.values())[0]["value"])
else:
glogger.debug("No match between variable name and query.")
return vcodes
def get_yaml_decorators(rq):
"""
Returns the yaml decorator metadata only (this is needed by triple pattern fragments)
"""
# glogger.debug('Guessing decorators for query {}'.format(rq))
if not rq:
return None
if isinstance(rq, dict) and 'grlc' in rq: # json query (sparql transformer)
yaml_string = rq['grlc']
query_string = rq
else: # classic query
yaml_string = "\n".join([row.lstrip('#+') for row in rq.split('\n') if row.startswith('#+')])
query_string = "\n".join([row for row in rq.split('\n') if not row.startswith('#+')])
query_metadata = None
if type(yaml_string) == dict:
query_metadata = yaml_string
elif type(yaml_string) == str:
try: # Invalid YAMLs will produce empty metadata
query_metadata = yaml.load(yaml_string)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
try:
query_metadata = json.loads(yaml_string)
except json.JSONDecodeError:
glogger.warning("Query decorators could not be parsed; check your YAML syntax")
# If there is no YAML string
if query_metadata is None:
query_metadata = {}
query_metadata['query'] = query_string
# glogger.debug("Parsed query decorators: {}".format(query_metadata))
return query_metadata
def enable_custom_function_prefix(rq, prefix):
if ' %s:' % prefix in rq or '(%s:' % prefix in rq and not 'PREFIX %s:' % prefix in rq:
rq = 'PREFIX %s: <:%s>\n' % (prefix, prefix) + rq
return rq
def get_metadata(rq, endpoint):
"""
Returns the metadata 'exp' parsed from the raw query file 'rq'
'exp' is one of: 'endpoint', 'tags', 'summary', 'request', 'pagination', 'enumerate'
"""
query_metadata = get_yaml_decorators(rq)
query_metadata['type'] = 'UNKNOWN'
query_metadata['original_query'] = rq
if isinstance(rq, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(rq)
rq = rq.strip()
query_metadata['proto'] = proto
query_metadata['opt'] = opt
query_metadata['query'] = rq
rq = enable_custom_function_prefix(rq, 'bif')
rq = enable_custom_function_prefix(rq, 'sql')
try:
# THE PARSING
# select, describe, construct, ask
parsed_query = translateQuery(Query.parseString(rq, parseAll=True))
query_metadata['type'] = parsed_query.algebra.name
if query_metadata['type'] == 'SelectQuery':
# Projection variables
query_metadata['variables'] = parsed_query.algebra['PV']
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
elif query_metadata['type'] == 'ConstructQuery':
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
else:
glogger.warning(
"Query type {} is currently unsupported and no metadata was parsed!".format(query_metadata['type']))
except ParseException as pe:
glogger.warning(pe)
glogger.warning("Could not parse regular SELECT, CONSTRUCT, DESCRIBE or ASK query")
# glogger.warning(traceback.print_exc())
# insert queries won't parse, so we regex
# glogger.info("Trying to parse INSERT query")
# if static.INSERT_PATTERN in rq:
# query_metadata['type'] = 'InsertQuery'
# query_metadata['parameters'] = [u'_g_iri']
try:
# update query
glogger.info("Trying to parse UPDATE query")
parsed_query = UpdateUnit.parseString(rq, parseAll=True)
glogger.info(parsed_query)
query_metadata['type'] = parsed_query[0]['request'][0].name
if query_metadata['type'] == 'InsertData':
query_metadata['parameters'] = {
'g': {'datatype': None, 'enum': [], 'lang': None, 'name': 'g', 'original': '?_g_iri',
'required': True, 'type': 'iri'},
'data': {'datatype': None, 'enum': [], 'lang': None, 'name': 'data', 'original': '?_data',
'required': True, 'type': 'literal'}}
glogger.info("Update query parsed with {}".format(query_metadata['type']))
# if query_metadata['type'] == 'InsertData':
# query_metadata['variables'] = parsed_query.algebra['PV']
except:
glogger.error("Could not parse query")
glogger.error(query_metadata['query'])
glogger.error(traceback.print_exc())
pass
glogger.debug("Finished parsing query of type {}".format(query_metadata['type']))
glogger.debug("All parsed query metadata (from decorators and content): ")
glogger.debug(pformat(query_metadata, indent=32))
return query_metadata
def paginate_query(query, results_per_page, get_args):
page = get_args.get('page', 1)
glogger.info("Paginating query for page {}, {} results per page".format(page, results_per_page))
# If contains LIMIT or OFFSET, remove them
glogger.debug("Original query: " + query)
no_limit_query = re.sub("((LIMIT|OFFSET)\s+[0-9]+)*", "", query)
glogger.debug("No limit query: " + no_limit_query)
# Append LIMIT results_per_page OFFSET (page-1)*results_per_page
paginated_query = no_limit_query + " LIMIT {} OFFSET {}".format(results_per_page,
(int(page) - 1) * results_per_page)
glogger.debug("Paginated query: " + paginated_query)
return paginated_query
def rewrite_query(query, parameters, get_args):
glogger.debug("Query parameters")
glogger.debug(parameters)
requireXSD = False
required_params = {}
for k, v in parameters.items():
if parameters[k]['required']:
required_params[k] = v
requiredParams = set(required_params.keys())
providedParams = set(get_args.keys())
glogger.debug("Required parameters: {} Request args: {}".format(requiredParams, providedParams))
assert requiredParams.issubset(providedParams), 'Provided parameters do not cover the required parameters!'
for pname, p in list(parameters.items()):
# Get the parameter value from the GET request
v = get_args.get(pname, None)
# If the parameter has a value
if not v:
continue
if isinstance(query, dict): # json query (sparql transformer)
if '$values' not in query:
query['$values'] = {}
values = query['$values']
if not p['original'] in values:
values[p['original']] = v
elif isinstance(values[p['original']], list):
values[p['original']].append(v)
else:
values[p['original']] = [values[p['original']], v]
continue
# IRI
if p['type'] == 'iri': # TODO: never reached anymore, since iris are now type=string with format=iri
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# A number (without a datatype)
elif p['type'] == 'number':
query = query.replace(p['original'], v)
# Literals
elif p['type'] == 'literal' or p['type'] == 'string':
# If it's a iri
if 'format' in p and p['format'] == 'iri':
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# If there is a language tag
if 'lang' in p and p['lang']:
query = query.replace(p['original'], "\"{}\"@{}".format(v, p['lang']))
elif 'datatype' in p and p['datatype']:
query = query.replace(p['original'], "\"{}\"^^{}".format(v, p['datatype']))
if 'xsd' in p['datatype']:
requireXSD = True
else:
query = query.replace(p['original'], "\"{}\"".format(v))
if isinstance(query, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(query)
query = rq.strip()
if requireXSD and XSD_PREFIX not in query:
query = query.replace('SELECT', XSD_PREFIX + '\n\nSELECT')
glogger.debug("Query rewritten as: " + query)
return query
|
CLARIAH/grlc | src/gquery.py | get_enumeration | python | def get_enumeration(rq, v, endpoint, metadata={}, auth=None):
# glogger.debug("Metadata before processing enums: {}".format(metadata))
# We only fire the enum filling queries if indicated by the query metadata
if 'enumerate' not in metadata:
return None
enumDict = _getDictWithKey(v, metadata['enumerate'])
if enumDict:
return enumDict[v]
if v in metadata['enumerate']:
return get_enumeration_sparql(rq, v, endpoint, auth)
return None | Returns a list of enumerated values for variable 'v' in query 'rq' | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/gquery.py#L196-L209 | [
"def _getDictWithKey(key, dict_list):\n \"\"\" Returns the first dictionary in dict_list which contains the given key\"\"\"\n for d in dict_list:\n if key in d:\n return d\n return None\n",
"def get_enumeration_sparql(rq, v, endpoint, auth=None):\n \"\"\"\n Returns a list of enume... | #!/usr/bin/env python
# gquery.py: functions that deal with / transform SPARQL queries in grlc
import yaml
import json
from rdflib.plugins.sparql.parser import Query, UpdateUnit
from rdflib.plugins.sparql.processor import translateQuery
from flask import request, has_request_context
from pyparsing import ParseException
import SPARQLTransformer
import logging
from pprint import pformat
import traceback
import re
import requests
# grlc modules
import grlc.static as static
glogger = logging.getLogger(__name__)
XSD_PREFIX = 'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
def guess_endpoint_uri(rq, gh_repo):
"""
Guesses the endpoint URI from (in this order):
- An endpoint parameter in URL
- An #+endpoint decorator
- A endpoint.txt file in the repo
Otherwise assigns a default one
"""
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
if has_request_context() and "endpoint" in request.args:
endpoint = request.args['endpoint']
glogger.info("Endpoint provided in request: " + endpoint)
return endpoint, auth
# Decorator
try:
decorators = get_yaml_decorators(rq)
endpoint = decorators['endpoint']
auth = None
glogger.info("Decorator guessed endpoint: " + endpoint)
except (TypeError, KeyError):
# File
try:
endpoint_content = gh_repo.getTextFor({'download_url': 'endpoint.txt'})
endpoint = endpoint_content.strip().splitlines()[0]
auth = None
glogger.info("File guessed endpoint: " + endpoint)
# TODO: except all is really ugly
except:
# Default
endpoint = static.DEFAULT_ENDPOINT
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
glogger.warning("No endpoint specified, using default ({})".format(endpoint))
return endpoint, auth
def count_query_results(query, endpoint):
"""
Returns the total number of results that query 'query' will generate
WARNING: This is too expensive just for providing a number of result pages
Providing a dummy count for now
"""
# number_results_query, repl = re.subn("SELECT.*FROM", "SELECT COUNT (*) FROM", query)
# if not repl:
# number_results_query = re.sub("SELECT.*{", "SELECT COUNT(*) {", query)
# number_results_query = re.sub("GROUP\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("ORDER\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("LIMIT\s+[0-9]+", "", number_results_query)
# number_results_query = re.sub("OFFSET\s+[0-9]+", "", number_results_query)
#
# glogger.debug("Query for result count: " + number_results_query)
#
# # Preapre HTTP request
# headers = { 'Accept' : 'application/json' }
# data = { 'query' : number_results_query }
# count_json = requests.get(endpoint, params=data, headers=headers).json()
# count = int(count_json['results']['bindings'][0]['callret-0']['value'])
# glogger.info("Paginated query has {} results in total".format(count))
#
# return count
return 1000
def _getDictWithKey(key, dict_list):
""" Returns the first dictionary in dict_list which contains the given key"""
for d in dict_list:
if key in d:
return d
return None
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
"""
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
"""
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters
def get_defaults(rq, v, metadata):
"""
Returns the default value for a parameter or None
"""
glogger.debug("Metadata with defaults: {}".format(metadata))
if 'defaults' not in metadata:
return None
defaultsDict = _getDictWithKey(v, metadata['defaults'])
if defaultsDict:
return defaultsDict[v]
return None
def get_enumeration_sparql(rq, v, endpoint, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
glogger.info('Retrieving enumeration for variable {}'.format(v))
vcodes = []
# tpattern_matcher = re.compile(".*(FROM\s+)?(?P<gnames>.*)\s+WHERE.*[\.\{][\n\t\s]*(?P<tpattern>.*\?" + re.escape(v) + ".*?\.).*", flags=re.DOTALL)
# tpattern_matcher = re.compile(".*?((FROM\s*)(?P<gnames>(\<.*\>)+))?\s*WHERE\s*\{(?P<tpattern>.*)\}.*", flags=re.DOTALL)
# WHERE is optional too!!
tpattern_matcher = re.compile(".*?(FROM\s*(?P<gnames>\<.*\>+))?\s*(WHERE\s*)?\{(?P<tpattern>.*)\}.*",
flags=re.DOTALL)
glogger.debug(rq)
tp_match = tpattern_matcher.match(rq)
if tp_match:
vtpattern = tp_match.group('tpattern')
gnames = tp_match.group('gnames')
glogger.debug("Detected graph names: {}".format(gnames))
glogger.debug("Detected BGP: {}".format(vtpattern))
glogger.debug("Matched triple pattern with parameter")
if gnames:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " FROM " + gnames + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
else:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
glogger.debug("Codes subquery: {}".format(codes_subquery))
glogger.debug(endpoint)
codes_json = requests.get(endpoint, params={'query': codes_subquery},
headers={'Accept': static.mimetypes['json'],
'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}, auth=auth).json()
for code in codes_json['results']['bindings']:
vcodes.append(list(code.values())[0]["value"])
else:
glogger.debug("No match between variable name and query.")
return vcodes
def get_yaml_decorators(rq):
"""
Returns the yaml decorator metadata only (this is needed by triple pattern fragments)
"""
# glogger.debug('Guessing decorators for query {}'.format(rq))
if not rq:
return None
if isinstance(rq, dict) and 'grlc' in rq: # json query (sparql transformer)
yaml_string = rq['grlc']
query_string = rq
else: # classic query
yaml_string = "\n".join([row.lstrip('#+') for row in rq.split('\n') if row.startswith('#+')])
query_string = "\n".join([row for row in rq.split('\n') if not row.startswith('#+')])
query_metadata = None
if type(yaml_string) == dict:
query_metadata = yaml_string
elif type(yaml_string) == str:
try: # Invalid YAMLs will produce empty metadata
query_metadata = yaml.load(yaml_string)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
try:
query_metadata = json.loads(yaml_string)
except json.JSONDecodeError:
glogger.warning("Query decorators could not be parsed; check your YAML syntax")
# If there is no YAML string
if query_metadata is None:
query_metadata = {}
query_metadata['query'] = query_string
# glogger.debug("Parsed query decorators: {}".format(query_metadata))
return query_metadata
def enable_custom_function_prefix(rq, prefix):
if ' %s:' % prefix in rq or '(%s:' % prefix in rq and not 'PREFIX %s:' % prefix in rq:
rq = 'PREFIX %s: <:%s>\n' % (prefix, prefix) + rq
return rq
def get_metadata(rq, endpoint):
"""
Returns the metadata 'exp' parsed from the raw query file 'rq'
'exp' is one of: 'endpoint', 'tags', 'summary', 'request', 'pagination', 'enumerate'
"""
query_metadata = get_yaml_decorators(rq)
query_metadata['type'] = 'UNKNOWN'
query_metadata['original_query'] = rq
if isinstance(rq, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(rq)
rq = rq.strip()
query_metadata['proto'] = proto
query_metadata['opt'] = opt
query_metadata['query'] = rq
rq = enable_custom_function_prefix(rq, 'bif')
rq = enable_custom_function_prefix(rq, 'sql')
try:
# THE PARSING
# select, describe, construct, ask
parsed_query = translateQuery(Query.parseString(rq, parseAll=True))
query_metadata['type'] = parsed_query.algebra.name
if query_metadata['type'] == 'SelectQuery':
# Projection variables
query_metadata['variables'] = parsed_query.algebra['PV']
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
elif query_metadata['type'] == 'ConstructQuery':
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
else:
glogger.warning(
"Query type {} is currently unsupported and no metadata was parsed!".format(query_metadata['type']))
except ParseException as pe:
glogger.warning(pe)
glogger.warning("Could not parse regular SELECT, CONSTRUCT, DESCRIBE or ASK query")
# glogger.warning(traceback.print_exc())
# insert queries won't parse, so we regex
# glogger.info("Trying to parse INSERT query")
# if static.INSERT_PATTERN in rq:
# query_metadata['type'] = 'InsertQuery'
# query_metadata['parameters'] = [u'_g_iri']
try:
# update query
glogger.info("Trying to parse UPDATE query")
parsed_query = UpdateUnit.parseString(rq, parseAll=True)
glogger.info(parsed_query)
query_metadata['type'] = parsed_query[0]['request'][0].name
if query_metadata['type'] == 'InsertData':
query_metadata['parameters'] = {
'g': {'datatype': None, 'enum': [], 'lang': None, 'name': 'g', 'original': '?_g_iri',
'required': True, 'type': 'iri'},
'data': {'datatype': None, 'enum': [], 'lang': None, 'name': 'data', 'original': '?_data',
'required': True, 'type': 'literal'}}
glogger.info("Update query parsed with {}".format(query_metadata['type']))
# if query_metadata['type'] == 'InsertData':
# query_metadata['variables'] = parsed_query.algebra['PV']
except:
glogger.error("Could not parse query")
glogger.error(query_metadata['query'])
glogger.error(traceback.print_exc())
pass
glogger.debug("Finished parsing query of type {}".format(query_metadata['type']))
glogger.debug("All parsed query metadata (from decorators and content): ")
glogger.debug(pformat(query_metadata, indent=32))
return query_metadata
def paginate_query(query, results_per_page, get_args):
page = get_args.get('page', 1)
glogger.info("Paginating query for page {}, {} results per page".format(page, results_per_page))
# If contains LIMIT or OFFSET, remove them
glogger.debug("Original query: " + query)
no_limit_query = re.sub("((LIMIT|OFFSET)\s+[0-9]+)*", "", query)
glogger.debug("No limit query: " + no_limit_query)
# Append LIMIT results_per_page OFFSET (page-1)*results_per_page
paginated_query = no_limit_query + " LIMIT {} OFFSET {}".format(results_per_page,
(int(page) - 1) * results_per_page)
glogger.debug("Paginated query: " + paginated_query)
return paginated_query
def rewrite_query(query, parameters, get_args):
glogger.debug("Query parameters")
glogger.debug(parameters)
requireXSD = False
required_params = {}
for k, v in parameters.items():
if parameters[k]['required']:
required_params[k] = v
requiredParams = set(required_params.keys())
providedParams = set(get_args.keys())
glogger.debug("Required parameters: {} Request args: {}".format(requiredParams, providedParams))
assert requiredParams.issubset(providedParams), 'Provided parameters do not cover the required parameters!'
for pname, p in list(parameters.items()):
# Get the parameter value from the GET request
v = get_args.get(pname, None)
# If the parameter has a value
if not v:
continue
if isinstance(query, dict): # json query (sparql transformer)
if '$values' not in query:
query['$values'] = {}
values = query['$values']
if not p['original'] in values:
values[p['original']] = v
elif isinstance(values[p['original']], list):
values[p['original']].append(v)
else:
values[p['original']] = [values[p['original']], v]
continue
# IRI
if p['type'] == 'iri': # TODO: never reached anymore, since iris are now type=string with format=iri
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# A number (without a datatype)
elif p['type'] == 'number':
query = query.replace(p['original'], v)
# Literals
elif p['type'] == 'literal' or p['type'] == 'string':
# If it's a iri
if 'format' in p and p['format'] == 'iri':
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# If there is a language tag
if 'lang' in p and p['lang']:
query = query.replace(p['original'], "\"{}\"@{}".format(v, p['lang']))
elif 'datatype' in p and p['datatype']:
query = query.replace(p['original'], "\"{}\"^^{}".format(v, p['datatype']))
if 'xsd' in p['datatype']:
requireXSD = True
else:
query = query.replace(p['original'], "\"{}\"".format(v))
if isinstance(query, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(query)
query = rq.strip()
if requireXSD and XSD_PREFIX not in query:
query = query.replace('SELECT', XSD_PREFIX + '\n\nSELECT')
glogger.debug("Query rewritten as: " + query)
return query
|
CLARIAH/grlc | src/gquery.py | get_enumeration_sparql | python | def get_enumeration_sparql(rq, v, endpoint, auth=None):
glogger.info('Retrieving enumeration for variable {}'.format(v))
vcodes = []
# tpattern_matcher = re.compile(".*(FROM\s+)?(?P<gnames>.*)\s+WHERE.*[\.\{][\n\t\s]*(?P<tpattern>.*\?" + re.escape(v) + ".*?\.).*", flags=re.DOTALL)
# tpattern_matcher = re.compile(".*?((FROM\s*)(?P<gnames>(\<.*\>)+))?\s*WHERE\s*\{(?P<tpattern>.*)\}.*", flags=re.DOTALL)
# WHERE is optional too!!
tpattern_matcher = re.compile(".*?(FROM\s*(?P<gnames>\<.*\>+))?\s*(WHERE\s*)?\{(?P<tpattern>.*)\}.*",
flags=re.DOTALL)
glogger.debug(rq)
tp_match = tpattern_matcher.match(rq)
if tp_match:
vtpattern = tp_match.group('tpattern')
gnames = tp_match.group('gnames')
glogger.debug("Detected graph names: {}".format(gnames))
glogger.debug("Detected BGP: {}".format(vtpattern))
glogger.debug("Matched triple pattern with parameter")
if gnames:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " FROM " + gnames + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
else:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
glogger.debug("Codes subquery: {}".format(codes_subquery))
glogger.debug(endpoint)
codes_json = requests.get(endpoint, params={'query': codes_subquery},
headers={'Accept': static.mimetypes['json'],
'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}, auth=auth).json()
for code in codes_json['results']['bindings']:
vcodes.append(list(code.values())[0]["value"])
else:
glogger.debug("No match between variable name and query.")
return vcodes | Returns a list of enumerated values for variable 'v' in query 'rq' | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/gquery.py#L212-L251 | null | #!/usr/bin/env python
# gquery.py: functions that deal with / transform SPARQL queries in grlc
import yaml
import json
from rdflib.plugins.sparql.parser import Query, UpdateUnit
from rdflib.plugins.sparql.processor import translateQuery
from flask import request, has_request_context
from pyparsing import ParseException
import SPARQLTransformer
import logging
from pprint import pformat
import traceback
import re
import requests
# grlc modules
import grlc.static as static
glogger = logging.getLogger(__name__)
XSD_PREFIX = 'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
def guess_endpoint_uri(rq, gh_repo):
"""
Guesses the endpoint URI from (in this order):
- An endpoint parameter in URL
- An #+endpoint decorator
- A endpoint.txt file in the repo
Otherwise assigns a default one
"""
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
if has_request_context() and "endpoint" in request.args:
endpoint = request.args['endpoint']
glogger.info("Endpoint provided in request: " + endpoint)
return endpoint, auth
# Decorator
try:
decorators = get_yaml_decorators(rq)
endpoint = decorators['endpoint']
auth = None
glogger.info("Decorator guessed endpoint: " + endpoint)
except (TypeError, KeyError):
# File
try:
endpoint_content = gh_repo.getTextFor({'download_url': 'endpoint.txt'})
endpoint = endpoint_content.strip().splitlines()[0]
auth = None
glogger.info("File guessed endpoint: " + endpoint)
# TODO: except all is really ugly
except:
# Default
endpoint = static.DEFAULT_ENDPOINT
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
glogger.warning("No endpoint specified, using default ({})".format(endpoint))
return endpoint, auth
def count_query_results(query, endpoint):
"""
Returns the total number of results that query 'query' will generate
WARNING: This is too expensive just for providing a number of result pages
Providing a dummy count for now
"""
# number_results_query, repl = re.subn("SELECT.*FROM", "SELECT COUNT (*) FROM", query)
# if not repl:
# number_results_query = re.sub("SELECT.*{", "SELECT COUNT(*) {", query)
# number_results_query = re.sub("GROUP\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("ORDER\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("LIMIT\s+[0-9]+", "", number_results_query)
# number_results_query = re.sub("OFFSET\s+[0-9]+", "", number_results_query)
#
# glogger.debug("Query for result count: " + number_results_query)
#
# # Preapre HTTP request
# headers = { 'Accept' : 'application/json' }
# data = { 'query' : number_results_query }
# count_json = requests.get(endpoint, params=data, headers=headers).json()
# count = int(count_json['results']['bindings'][0]['callret-0']['value'])
# glogger.info("Paginated query has {} results in total".format(count))
#
# return count
return 1000
def _getDictWithKey(key, dict_list):
""" Returns the first dictionary in dict_list which contains the given key"""
for d in dict_list:
if key in d:
return d
return None
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
"""
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
"""
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters
def get_defaults(rq, v, metadata):
"""
Returns the default value for a parameter or None
"""
glogger.debug("Metadata with defaults: {}".format(metadata))
if 'defaults' not in metadata:
return None
defaultsDict = _getDictWithKey(v, metadata['defaults'])
if defaultsDict:
return defaultsDict[v]
return None
def get_enumeration(rq, v, endpoint, metadata={}, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
# glogger.debug("Metadata before processing enums: {}".format(metadata))
# We only fire the enum filling queries if indicated by the query metadata
if 'enumerate' not in metadata:
return None
enumDict = _getDictWithKey(v, metadata['enumerate'])
if enumDict:
return enumDict[v]
if v in metadata['enumerate']:
return get_enumeration_sparql(rq, v, endpoint, auth)
return None
def get_yaml_decorators(rq):
"""
Returns the yaml decorator metadata only (this is needed by triple pattern fragments)
"""
# glogger.debug('Guessing decorators for query {}'.format(rq))
if not rq:
return None
if isinstance(rq, dict) and 'grlc' in rq: # json query (sparql transformer)
yaml_string = rq['grlc']
query_string = rq
else: # classic query
yaml_string = "\n".join([row.lstrip('#+') for row in rq.split('\n') if row.startswith('#+')])
query_string = "\n".join([row for row in rq.split('\n') if not row.startswith('#+')])
query_metadata = None
if type(yaml_string) == dict:
query_metadata = yaml_string
elif type(yaml_string) == str:
try: # Invalid YAMLs will produce empty metadata
query_metadata = yaml.load(yaml_string)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
try:
query_metadata = json.loads(yaml_string)
except json.JSONDecodeError:
glogger.warning("Query decorators could not be parsed; check your YAML syntax")
# If there is no YAML string
if query_metadata is None:
query_metadata = {}
query_metadata['query'] = query_string
# glogger.debug("Parsed query decorators: {}".format(query_metadata))
return query_metadata
def enable_custom_function_prefix(rq, prefix):
if ' %s:' % prefix in rq or '(%s:' % prefix in rq and not 'PREFIX %s:' % prefix in rq:
rq = 'PREFIX %s: <:%s>\n' % (prefix, prefix) + rq
return rq
def get_metadata(rq, endpoint):
"""
Returns the metadata 'exp' parsed from the raw query file 'rq'
'exp' is one of: 'endpoint', 'tags', 'summary', 'request', 'pagination', 'enumerate'
"""
query_metadata = get_yaml_decorators(rq)
query_metadata['type'] = 'UNKNOWN'
query_metadata['original_query'] = rq
if isinstance(rq, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(rq)
rq = rq.strip()
query_metadata['proto'] = proto
query_metadata['opt'] = opt
query_metadata['query'] = rq
rq = enable_custom_function_prefix(rq, 'bif')
rq = enable_custom_function_prefix(rq, 'sql')
try:
# THE PARSING
# select, describe, construct, ask
parsed_query = translateQuery(Query.parseString(rq, parseAll=True))
query_metadata['type'] = parsed_query.algebra.name
if query_metadata['type'] == 'SelectQuery':
# Projection variables
query_metadata['variables'] = parsed_query.algebra['PV']
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
elif query_metadata['type'] == 'ConstructQuery':
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
else:
glogger.warning(
"Query type {} is currently unsupported and no metadata was parsed!".format(query_metadata['type']))
except ParseException as pe:
glogger.warning(pe)
glogger.warning("Could not parse regular SELECT, CONSTRUCT, DESCRIBE or ASK query")
# glogger.warning(traceback.print_exc())
# insert queries won't parse, so we regex
# glogger.info("Trying to parse INSERT query")
# if static.INSERT_PATTERN in rq:
# query_metadata['type'] = 'InsertQuery'
# query_metadata['parameters'] = [u'_g_iri']
try:
# update query
glogger.info("Trying to parse UPDATE query")
parsed_query = UpdateUnit.parseString(rq, parseAll=True)
glogger.info(parsed_query)
query_metadata['type'] = parsed_query[0]['request'][0].name
if query_metadata['type'] == 'InsertData':
query_metadata['parameters'] = {
'g': {'datatype': None, 'enum': [], 'lang': None, 'name': 'g', 'original': '?_g_iri',
'required': True, 'type': 'iri'},
'data': {'datatype': None, 'enum': [], 'lang': None, 'name': 'data', 'original': '?_data',
'required': True, 'type': 'literal'}}
glogger.info("Update query parsed with {}".format(query_metadata['type']))
# if query_metadata['type'] == 'InsertData':
# query_metadata['variables'] = parsed_query.algebra['PV']
except:
glogger.error("Could not parse query")
glogger.error(query_metadata['query'])
glogger.error(traceback.print_exc())
pass
glogger.debug("Finished parsing query of type {}".format(query_metadata['type']))
glogger.debug("All parsed query metadata (from decorators and content): ")
glogger.debug(pformat(query_metadata, indent=32))
return query_metadata
def paginate_query(query, results_per_page, get_args):
page = get_args.get('page', 1)
glogger.info("Paginating query for page {}, {} results per page".format(page, results_per_page))
# If contains LIMIT or OFFSET, remove them
glogger.debug("Original query: " + query)
no_limit_query = re.sub("((LIMIT|OFFSET)\s+[0-9]+)*", "", query)
glogger.debug("No limit query: " + no_limit_query)
# Append LIMIT results_per_page OFFSET (page-1)*results_per_page
paginated_query = no_limit_query + " LIMIT {} OFFSET {}".format(results_per_page,
(int(page) - 1) * results_per_page)
glogger.debug("Paginated query: " + paginated_query)
return paginated_query
def rewrite_query(query, parameters, get_args):
glogger.debug("Query parameters")
glogger.debug(parameters)
requireXSD = False
required_params = {}
for k, v in parameters.items():
if parameters[k]['required']:
required_params[k] = v
requiredParams = set(required_params.keys())
providedParams = set(get_args.keys())
glogger.debug("Required parameters: {} Request args: {}".format(requiredParams, providedParams))
assert requiredParams.issubset(providedParams), 'Provided parameters do not cover the required parameters!'
for pname, p in list(parameters.items()):
# Get the parameter value from the GET request
v = get_args.get(pname, None)
# If the parameter has a value
if not v:
continue
if isinstance(query, dict): # json query (sparql transformer)
if '$values' not in query:
query['$values'] = {}
values = query['$values']
if not p['original'] in values:
values[p['original']] = v
elif isinstance(values[p['original']], list):
values[p['original']].append(v)
else:
values[p['original']] = [values[p['original']], v]
continue
# IRI
if p['type'] == 'iri': # TODO: never reached anymore, since iris are now type=string with format=iri
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# A number (without a datatype)
elif p['type'] == 'number':
query = query.replace(p['original'], v)
# Literals
elif p['type'] == 'literal' or p['type'] == 'string':
# If it's a iri
if 'format' in p and p['format'] == 'iri':
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# If there is a language tag
if 'lang' in p and p['lang']:
query = query.replace(p['original'], "\"{}\"@{}".format(v, p['lang']))
elif 'datatype' in p and p['datatype']:
query = query.replace(p['original'], "\"{}\"^^{}".format(v, p['datatype']))
if 'xsd' in p['datatype']:
requireXSD = True
else:
query = query.replace(p['original'], "\"{}\"".format(v))
if isinstance(query, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(query)
query = rq.strip()
if requireXSD and XSD_PREFIX not in query:
query = query.replace('SELECT', XSD_PREFIX + '\n\nSELECT')
glogger.debug("Query rewritten as: " + query)
return query
|
CLARIAH/grlc | src/gquery.py | get_yaml_decorators | python | def get_yaml_decorators(rq):
# glogger.debug('Guessing decorators for query {}'.format(rq))
if not rq:
return None
if isinstance(rq, dict) and 'grlc' in rq: # json query (sparql transformer)
yaml_string = rq['grlc']
query_string = rq
else: # classic query
yaml_string = "\n".join([row.lstrip('#+') for row in rq.split('\n') if row.startswith('#+')])
query_string = "\n".join([row for row in rq.split('\n') if not row.startswith('#+')])
query_metadata = None
if type(yaml_string) == dict:
query_metadata = yaml_string
elif type(yaml_string) == str:
try: # Invalid YAMLs will produce empty metadata
query_metadata = yaml.load(yaml_string)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
try:
query_metadata = json.loads(yaml_string)
except json.JSONDecodeError:
glogger.warning("Query decorators could not be parsed; check your YAML syntax")
# If there is no YAML string
if query_metadata is None:
query_metadata = {}
query_metadata['query'] = query_string
# glogger.debug("Parsed query decorators: {}".format(query_metadata))
return query_metadata | Returns the yaml decorator metadata only (this is needed by triple pattern fragments) | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/gquery.py#L254-L288 | null | #!/usr/bin/env python
# gquery.py: functions that deal with / transform SPARQL queries in grlc
import yaml
import json
from rdflib.plugins.sparql.parser import Query, UpdateUnit
from rdflib.plugins.sparql.processor import translateQuery
from flask import request, has_request_context
from pyparsing import ParseException
import SPARQLTransformer
import logging
from pprint import pformat
import traceback
import re
import requests
# grlc modules
import grlc.static as static
glogger = logging.getLogger(__name__)
XSD_PREFIX = 'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
def guess_endpoint_uri(rq, gh_repo):
"""
Guesses the endpoint URI from (in this order):
- An endpoint parameter in URL
- An #+endpoint decorator
- A endpoint.txt file in the repo
Otherwise assigns a default one
"""
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
if has_request_context() and "endpoint" in request.args:
endpoint = request.args['endpoint']
glogger.info("Endpoint provided in request: " + endpoint)
return endpoint, auth
# Decorator
try:
decorators = get_yaml_decorators(rq)
endpoint = decorators['endpoint']
auth = None
glogger.info("Decorator guessed endpoint: " + endpoint)
except (TypeError, KeyError):
# File
try:
endpoint_content = gh_repo.getTextFor({'download_url': 'endpoint.txt'})
endpoint = endpoint_content.strip().splitlines()[0]
auth = None
glogger.info("File guessed endpoint: " + endpoint)
# TODO: except all is really ugly
except:
# Default
endpoint = static.DEFAULT_ENDPOINT
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
glogger.warning("No endpoint specified, using default ({})".format(endpoint))
return endpoint, auth
def count_query_results(query, endpoint):
"""
Returns the total number of results that query 'query' will generate
WARNING: This is too expensive just for providing a number of result pages
Providing a dummy count for now
"""
# number_results_query, repl = re.subn("SELECT.*FROM", "SELECT COUNT (*) FROM", query)
# if not repl:
# number_results_query = re.sub("SELECT.*{", "SELECT COUNT(*) {", query)
# number_results_query = re.sub("GROUP\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("ORDER\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("LIMIT\s+[0-9]+", "", number_results_query)
# number_results_query = re.sub("OFFSET\s+[0-9]+", "", number_results_query)
#
# glogger.debug("Query for result count: " + number_results_query)
#
# # Preapre HTTP request
# headers = { 'Accept' : 'application/json' }
# data = { 'query' : number_results_query }
# count_json = requests.get(endpoint, params=data, headers=headers).json()
# count = int(count_json['results']['bindings'][0]['callret-0']['value'])
# glogger.info("Paginated query has {} results in total".format(count))
#
# return count
return 1000
def _getDictWithKey(key, dict_list):
""" Returns the first dictionary in dict_list which contains the given key"""
for d in dict_list:
if key in d:
return d
return None
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
"""
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
"""
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters
def get_defaults(rq, v, metadata):
"""
Returns the default value for a parameter or None
"""
glogger.debug("Metadata with defaults: {}".format(metadata))
if 'defaults' not in metadata:
return None
defaultsDict = _getDictWithKey(v, metadata['defaults'])
if defaultsDict:
return defaultsDict[v]
return None
def get_enumeration(rq, v, endpoint, metadata={}, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
# glogger.debug("Metadata before processing enums: {}".format(metadata))
# We only fire the enum filling queries if indicated by the query metadata
if 'enumerate' not in metadata:
return None
enumDict = _getDictWithKey(v, metadata['enumerate'])
if enumDict:
return enumDict[v]
if v in metadata['enumerate']:
return get_enumeration_sparql(rq, v, endpoint, auth)
return None
def get_enumeration_sparql(rq, v, endpoint, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
glogger.info('Retrieving enumeration for variable {}'.format(v))
vcodes = []
# tpattern_matcher = re.compile(".*(FROM\s+)?(?P<gnames>.*)\s+WHERE.*[\.\{][\n\t\s]*(?P<tpattern>.*\?" + re.escape(v) + ".*?\.).*", flags=re.DOTALL)
# tpattern_matcher = re.compile(".*?((FROM\s*)(?P<gnames>(\<.*\>)+))?\s*WHERE\s*\{(?P<tpattern>.*)\}.*", flags=re.DOTALL)
# WHERE is optional too!!
tpattern_matcher = re.compile(".*?(FROM\s*(?P<gnames>\<.*\>+))?\s*(WHERE\s*)?\{(?P<tpattern>.*)\}.*",
flags=re.DOTALL)
glogger.debug(rq)
tp_match = tpattern_matcher.match(rq)
if tp_match:
vtpattern = tp_match.group('tpattern')
gnames = tp_match.group('gnames')
glogger.debug("Detected graph names: {}".format(gnames))
glogger.debug("Detected BGP: {}".format(vtpattern))
glogger.debug("Matched triple pattern with parameter")
if gnames:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " FROM " + gnames + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
else:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
glogger.debug("Codes subquery: {}".format(codes_subquery))
glogger.debug(endpoint)
codes_json = requests.get(endpoint, params={'query': codes_subquery},
headers={'Accept': static.mimetypes['json'],
'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}, auth=auth).json()
for code in codes_json['results']['bindings']:
vcodes.append(list(code.values())[0]["value"])
else:
glogger.debug("No match between variable name and query.")
return vcodes
def enable_custom_function_prefix(rq, prefix):
if ' %s:' % prefix in rq or '(%s:' % prefix in rq and not 'PREFIX %s:' % prefix in rq:
rq = 'PREFIX %s: <:%s>\n' % (prefix, prefix) + rq
return rq
def get_metadata(rq, endpoint):
"""
Returns the metadata 'exp' parsed from the raw query file 'rq'
'exp' is one of: 'endpoint', 'tags', 'summary', 'request', 'pagination', 'enumerate'
"""
query_metadata = get_yaml_decorators(rq)
query_metadata['type'] = 'UNKNOWN'
query_metadata['original_query'] = rq
if isinstance(rq, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(rq)
rq = rq.strip()
query_metadata['proto'] = proto
query_metadata['opt'] = opt
query_metadata['query'] = rq
rq = enable_custom_function_prefix(rq, 'bif')
rq = enable_custom_function_prefix(rq, 'sql')
try:
# THE PARSING
# select, describe, construct, ask
parsed_query = translateQuery(Query.parseString(rq, parseAll=True))
query_metadata['type'] = parsed_query.algebra.name
if query_metadata['type'] == 'SelectQuery':
# Projection variables
query_metadata['variables'] = parsed_query.algebra['PV']
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
elif query_metadata['type'] == 'ConstructQuery':
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
else:
glogger.warning(
"Query type {} is currently unsupported and no metadata was parsed!".format(query_metadata['type']))
except ParseException as pe:
glogger.warning(pe)
glogger.warning("Could not parse regular SELECT, CONSTRUCT, DESCRIBE or ASK query")
# glogger.warning(traceback.print_exc())
# insert queries won't parse, so we regex
# glogger.info("Trying to parse INSERT query")
# if static.INSERT_PATTERN in rq:
# query_metadata['type'] = 'InsertQuery'
# query_metadata['parameters'] = [u'_g_iri']
try:
# update query
glogger.info("Trying to parse UPDATE query")
parsed_query = UpdateUnit.parseString(rq, parseAll=True)
glogger.info(parsed_query)
query_metadata['type'] = parsed_query[0]['request'][0].name
if query_metadata['type'] == 'InsertData':
query_metadata['parameters'] = {
'g': {'datatype': None, 'enum': [], 'lang': None, 'name': 'g', 'original': '?_g_iri',
'required': True, 'type': 'iri'},
'data': {'datatype': None, 'enum': [], 'lang': None, 'name': 'data', 'original': '?_data',
'required': True, 'type': 'literal'}}
glogger.info("Update query parsed with {}".format(query_metadata['type']))
# if query_metadata['type'] == 'InsertData':
# query_metadata['variables'] = parsed_query.algebra['PV']
except:
glogger.error("Could not parse query")
glogger.error(query_metadata['query'])
glogger.error(traceback.print_exc())
pass
glogger.debug("Finished parsing query of type {}".format(query_metadata['type']))
glogger.debug("All parsed query metadata (from decorators and content): ")
glogger.debug(pformat(query_metadata, indent=32))
return query_metadata
def paginate_query(query, results_per_page, get_args):
page = get_args.get('page', 1)
glogger.info("Paginating query for page {}, {} results per page".format(page, results_per_page))
# If contains LIMIT or OFFSET, remove them
glogger.debug("Original query: " + query)
no_limit_query = re.sub("((LIMIT|OFFSET)\s+[0-9]+)*", "", query)
glogger.debug("No limit query: " + no_limit_query)
# Append LIMIT results_per_page OFFSET (page-1)*results_per_page
paginated_query = no_limit_query + " LIMIT {} OFFSET {}".format(results_per_page,
(int(page) - 1) * results_per_page)
glogger.debug("Paginated query: " + paginated_query)
return paginated_query
def rewrite_query(query, parameters, get_args):
glogger.debug("Query parameters")
glogger.debug(parameters)
requireXSD = False
required_params = {}
for k, v in parameters.items():
if parameters[k]['required']:
required_params[k] = v
requiredParams = set(required_params.keys())
providedParams = set(get_args.keys())
glogger.debug("Required parameters: {} Request args: {}".format(requiredParams, providedParams))
assert requiredParams.issubset(providedParams), 'Provided parameters do not cover the required parameters!'
for pname, p in list(parameters.items()):
# Get the parameter value from the GET request
v = get_args.get(pname, None)
# If the parameter has a value
if not v:
continue
if isinstance(query, dict): # json query (sparql transformer)
if '$values' not in query:
query['$values'] = {}
values = query['$values']
if not p['original'] in values:
values[p['original']] = v
elif isinstance(values[p['original']], list):
values[p['original']].append(v)
else:
values[p['original']] = [values[p['original']], v]
continue
# IRI
if p['type'] == 'iri': # TODO: never reached anymore, since iris are now type=string with format=iri
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# A number (without a datatype)
elif p['type'] == 'number':
query = query.replace(p['original'], v)
# Literals
elif p['type'] == 'literal' or p['type'] == 'string':
# If it's a iri
if 'format' in p and p['format'] == 'iri':
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# If there is a language tag
if 'lang' in p and p['lang']:
query = query.replace(p['original'], "\"{}\"@{}".format(v, p['lang']))
elif 'datatype' in p and p['datatype']:
query = query.replace(p['original'], "\"{}\"^^{}".format(v, p['datatype']))
if 'xsd' in p['datatype']:
requireXSD = True
else:
query = query.replace(p['original'], "\"{}\"".format(v))
if isinstance(query, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(query)
query = rq.strip()
if requireXSD and XSD_PREFIX not in query:
query = query.replace('SELECT', XSD_PREFIX + '\n\nSELECT')
glogger.debug("Query rewritten as: " + query)
return query
|
CLARIAH/grlc | src/gquery.py | get_metadata | python | def get_metadata(rq, endpoint):
query_metadata = get_yaml_decorators(rq)
query_metadata['type'] = 'UNKNOWN'
query_metadata['original_query'] = rq
if isinstance(rq, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(rq)
rq = rq.strip()
query_metadata['proto'] = proto
query_metadata['opt'] = opt
query_metadata['query'] = rq
rq = enable_custom_function_prefix(rq, 'bif')
rq = enable_custom_function_prefix(rq, 'sql')
try:
# THE PARSING
# select, describe, construct, ask
parsed_query = translateQuery(Query.parseString(rq, parseAll=True))
query_metadata['type'] = parsed_query.algebra.name
if query_metadata['type'] == 'SelectQuery':
# Projection variables
query_metadata['variables'] = parsed_query.algebra['PV']
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
elif query_metadata['type'] == 'ConstructQuery':
# Parameters
query_metadata['parameters'] = get_parameters(rq, parsed_query.algebra['_vars'], endpoint, query_metadata)
else:
glogger.warning(
"Query type {} is currently unsupported and no metadata was parsed!".format(query_metadata['type']))
except ParseException as pe:
glogger.warning(pe)
glogger.warning("Could not parse regular SELECT, CONSTRUCT, DESCRIBE or ASK query")
# glogger.warning(traceback.print_exc())
# insert queries won't parse, so we regex
# glogger.info("Trying to parse INSERT query")
# if static.INSERT_PATTERN in rq:
# query_metadata['type'] = 'InsertQuery'
# query_metadata['parameters'] = [u'_g_iri']
try:
# update query
glogger.info("Trying to parse UPDATE query")
parsed_query = UpdateUnit.parseString(rq, parseAll=True)
glogger.info(parsed_query)
query_metadata['type'] = parsed_query[0]['request'][0].name
if query_metadata['type'] == 'InsertData':
query_metadata['parameters'] = {
'g': {'datatype': None, 'enum': [], 'lang': None, 'name': 'g', 'original': '?_g_iri',
'required': True, 'type': 'iri'},
'data': {'datatype': None, 'enum': [], 'lang': None, 'name': 'data', 'original': '?_data',
'required': True, 'type': 'literal'}}
glogger.info("Update query parsed with {}".format(query_metadata['type']))
# if query_metadata['type'] == 'InsertData':
# query_metadata['variables'] = parsed_query.algebra['PV']
except:
glogger.error("Could not parse query")
glogger.error(query_metadata['query'])
glogger.error(traceback.print_exc())
pass
glogger.debug("Finished parsing query of type {}".format(query_metadata['type']))
glogger.debug("All parsed query metadata (from decorators and content): ")
glogger.debug(pformat(query_metadata, indent=32))
return query_metadata | Returns the metadata 'exp' parsed from the raw query file 'rq'
'exp' is one of: 'endpoint', 'tags', 'summary', 'request', 'pagination', 'enumerate' | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/gquery.py#L297-L369 | [
"def get_yaml_decorators(rq):\n \"\"\"\n Returns the yaml decorator metadata only (this is needed by triple pattern fragments)\n \"\"\"\n # glogger.debug('Guessing decorators for query {}'.format(rq))\n if not rq:\n return None\n\n if isinstance(rq, dict) and 'grlc' in rq: # json query (sp... | #!/usr/bin/env python
# gquery.py: functions that deal with / transform SPARQL queries in grlc
import yaml
import json
from rdflib.plugins.sparql.parser import Query, UpdateUnit
from rdflib.plugins.sparql.processor import translateQuery
from flask import request, has_request_context
from pyparsing import ParseException
import SPARQLTransformer
import logging
from pprint import pformat
import traceback
import re
import requests
# grlc modules
import grlc.static as static
glogger = logging.getLogger(__name__)
XSD_PREFIX = 'PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>'
def guess_endpoint_uri(rq, gh_repo):
"""
Guesses the endpoint URI from (in this order):
- An endpoint parameter in URL
- An #+endpoint decorator
- A endpoint.txt file in the repo
Otherwise assigns a default one
"""
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
if has_request_context() and "endpoint" in request.args:
endpoint = request.args['endpoint']
glogger.info("Endpoint provided in request: " + endpoint)
return endpoint, auth
# Decorator
try:
decorators = get_yaml_decorators(rq)
endpoint = decorators['endpoint']
auth = None
glogger.info("Decorator guessed endpoint: " + endpoint)
except (TypeError, KeyError):
# File
try:
endpoint_content = gh_repo.getTextFor({'download_url': 'endpoint.txt'})
endpoint = endpoint_content.strip().splitlines()[0]
auth = None
glogger.info("File guessed endpoint: " + endpoint)
# TODO: except all is really ugly
except:
# Default
endpoint = static.DEFAULT_ENDPOINT
auth = (static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
if auth == ('none', 'none'):
auth = None
glogger.warning("No endpoint specified, using default ({})".format(endpoint))
return endpoint, auth
def count_query_results(query, endpoint):
"""
Returns the total number of results that query 'query' will generate
WARNING: This is too expensive just for providing a number of result pages
Providing a dummy count for now
"""
# number_results_query, repl = re.subn("SELECT.*FROM", "SELECT COUNT (*) FROM", query)
# if not repl:
# number_results_query = re.sub("SELECT.*{", "SELECT COUNT(*) {", query)
# number_results_query = re.sub("GROUP\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("ORDER\s+BY\s+[\?\_\(\)a-zA-Z0-9]+", "", number_results_query)
# number_results_query = re.sub("LIMIT\s+[0-9]+", "", number_results_query)
# number_results_query = re.sub("OFFSET\s+[0-9]+", "", number_results_query)
#
# glogger.debug("Query for result count: " + number_results_query)
#
# # Preapre HTTP request
# headers = { 'Accept' : 'application/json' }
# data = { 'query' : number_results_query }
# count_json = requests.get(endpoint, params=data, headers=headers).json()
# count = int(count_json['results']['bindings'][0]['callret-0']['value'])
# glogger.info("Paginated query has {} results in total".format(count))
#
# return count
return 1000
def _getDictWithKey(key, dict_list):
""" Returns the first dictionary in dict_list which contains the given key"""
for d in dict_list:
if key in d:
return d
return None
def get_parameters(rq, variables, endpoint, query_metadata, auth=None):
"""
?_name The variable specifies the API mandatory parameter name. The value is incorporated in the query as plain literal.
?__name The parameter name is optional.
?_name_iri The variable is substituted with the parameter value as a IRI (also: number or literal).
?_name_en The parameter value is considered as literal with the language 'en' (e.g., en,it,es, etc.).
?_name_integer The parameter value is considered as literal and the XSD datatype 'integer' is added during substitution.
?_name_prefix_datatype The parameter value is considered as literal and the datatype 'prefix:datatype' is added during substitution. The prefix must be specified according to the SPARQL syntax.
"""
# variables = translateQuery(Query.parseString(rq, parseAll=True)).algebra['_vars']
## Aggregates
internal_matcher = re.compile("__agg_\d+__")
## Basil-style variables
variable_matcher = re.compile(
"(?P<required>[_]{1,2})(?P<name>[^_]+)_?(?P<type>[a-zA-Z0-9]+)?_?(?P<userdefined>[a-zA-Z0-9]+)?.*$")
parameters = {}
for v in variables:
if internal_matcher.match(v):
continue
match = variable_matcher.match(v)
# TODO: currently only one parameter per triple pattern is supported
if match:
vname = match.group('name')
vrequired = True if match.group('required') == '_' else False
vtype = 'string'
# All these can be None
vcodes = get_enumeration(rq, vname, endpoint, query_metadata, auth)
vdefault = get_defaults(rq, vname, query_metadata)
vlang = None
vdatatype = None
vformat = None
mtype = match.group('type')
muserdefined = match.group('userdefined')
if mtype in ['number', 'literal', 'string']:
vtype = mtype
elif mtype in ['iri']: # TODO: proper form validation of input parameter uris
vtype = 'string'
vformat = 'iri'
elif mtype:
vtype = 'string'
if mtype in static.XSD_DATATYPES:
vdatatype = 'xsd:{}'.format(mtype)
elif len(mtype) == 2:
vlang = mtype
elif muserdefined:
vdatatype = '{}:{}'.format(mtype, muserdefined)
parameters[vname] = {
'original': '?{}'.format(v),
'required': vrequired,
'name': vname,
'type': vtype
}
# Possibly None parameter attributes
if vcodes is not None:
parameters[vname]['enum'] = sorted(vcodes)
if vlang is not None:
parameters[vname]['lang'] = vlang
if vdatatype is not None:
parameters[vname]['datatype'] = vdatatype
if vformat is not None:
parameters[vname]['format'] = vformat
if vdefault is not None:
parameters[vname]['default'] = vdefault
glogger.info('Finished parsing the following parameters: {}'.format(parameters))
return parameters
def get_defaults(rq, v, metadata):
"""
Returns the default value for a parameter or None
"""
glogger.debug("Metadata with defaults: {}".format(metadata))
if 'defaults' not in metadata:
return None
defaultsDict = _getDictWithKey(v, metadata['defaults'])
if defaultsDict:
return defaultsDict[v]
return None
def get_enumeration(rq, v, endpoint, metadata={}, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
# glogger.debug("Metadata before processing enums: {}".format(metadata))
# We only fire the enum filling queries if indicated by the query metadata
if 'enumerate' not in metadata:
return None
enumDict = _getDictWithKey(v, metadata['enumerate'])
if enumDict:
return enumDict[v]
if v in metadata['enumerate']:
return get_enumeration_sparql(rq, v, endpoint, auth)
return None
def get_enumeration_sparql(rq, v, endpoint, auth=None):
"""
Returns a list of enumerated values for variable 'v' in query 'rq'
"""
glogger.info('Retrieving enumeration for variable {}'.format(v))
vcodes = []
# tpattern_matcher = re.compile(".*(FROM\s+)?(?P<gnames>.*)\s+WHERE.*[\.\{][\n\t\s]*(?P<tpattern>.*\?" + re.escape(v) + ".*?\.).*", flags=re.DOTALL)
# tpattern_matcher = re.compile(".*?((FROM\s*)(?P<gnames>(\<.*\>)+))?\s*WHERE\s*\{(?P<tpattern>.*)\}.*", flags=re.DOTALL)
# WHERE is optional too!!
tpattern_matcher = re.compile(".*?(FROM\s*(?P<gnames>\<.*\>+))?\s*(WHERE\s*)?\{(?P<tpattern>.*)\}.*",
flags=re.DOTALL)
glogger.debug(rq)
tp_match = tpattern_matcher.match(rq)
if tp_match:
vtpattern = tp_match.group('tpattern')
gnames = tp_match.group('gnames')
glogger.debug("Detected graph names: {}".format(gnames))
glogger.debug("Detected BGP: {}".format(vtpattern))
glogger.debug("Matched triple pattern with parameter")
if gnames:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " FROM " + gnames + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
else:
codes_subquery = re.sub("SELECT.*\{.*\}.*",
"SELECT DISTINCT ?" + v + " WHERE { " + vtpattern + " }", rq,
flags=re.DOTALL)
glogger.debug("Codes subquery: {}".format(codes_subquery))
glogger.debug(endpoint)
codes_json = requests.get(endpoint, params={'query': codes_subquery},
headers={'Accept': static.mimetypes['json'],
'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}, auth=auth).json()
for code in codes_json['results']['bindings']:
vcodes.append(list(code.values())[0]["value"])
else:
glogger.debug("No match between variable name and query.")
return vcodes
def get_yaml_decorators(rq):
"""
Returns the yaml decorator metadata only (this is needed by triple pattern fragments)
"""
# glogger.debug('Guessing decorators for query {}'.format(rq))
if not rq:
return None
if isinstance(rq, dict) and 'grlc' in rq: # json query (sparql transformer)
yaml_string = rq['grlc']
query_string = rq
else: # classic query
yaml_string = "\n".join([row.lstrip('#+') for row in rq.split('\n') if row.startswith('#+')])
query_string = "\n".join([row for row in rq.split('\n') if not row.startswith('#+')])
query_metadata = None
if type(yaml_string) == dict:
query_metadata = yaml_string
elif type(yaml_string) == str:
try: # Invalid YAMLs will produce empty metadata
query_metadata = yaml.load(yaml_string)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
try:
query_metadata = json.loads(yaml_string)
except json.JSONDecodeError:
glogger.warning("Query decorators could not be parsed; check your YAML syntax")
# If there is no YAML string
if query_metadata is None:
query_metadata = {}
query_metadata['query'] = query_string
# glogger.debug("Parsed query decorators: {}".format(query_metadata))
return query_metadata
def enable_custom_function_prefix(rq, prefix):
if ' %s:' % prefix in rq or '(%s:' % prefix in rq and not 'PREFIX %s:' % prefix in rq:
rq = 'PREFIX %s: <:%s>\n' % (prefix, prefix) + rq
return rq
def paginate_query(query, results_per_page, get_args):
page = get_args.get('page', 1)
glogger.info("Paginating query for page {}, {} results per page".format(page, results_per_page))
# If contains LIMIT or OFFSET, remove them
glogger.debug("Original query: " + query)
no_limit_query = re.sub("((LIMIT|OFFSET)\s+[0-9]+)*", "", query)
glogger.debug("No limit query: " + no_limit_query)
# Append LIMIT results_per_page OFFSET (page-1)*results_per_page
paginated_query = no_limit_query + " LIMIT {} OFFSET {}".format(results_per_page,
(int(page) - 1) * results_per_page)
glogger.debug("Paginated query: " + paginated_query)
return paginated_query
def rewrite_query(query, parameters, get_args):
glogger.debug("Query parameters")
glogger.debug(parameters)
requireXSD = False
required_params = {}
for k, v in parameters.items():
if parameters[k]['required']:
required_params[k] = v
requiredParams = set(required_params.keys())
providedParams = set(get_args.keys())
glogger.debug("Required parameters: {} Request args: {}".format(requiredParams, providedParams))
assert requiredParams.issubset(providedParams), 'Provided parameters do not cover the required parameters!'
for pname, p in list(parameters.items()):
# Get the parameter value from the GET request
v = get_args.get(pname, None)
# If the parameter has a value
if not v:
continue
if isinstance(query, dict): # json query (sparql transformer)
if '$values' not in query:
query['$values'] = {}
values = query['$values']
if not p['original'] in values:
values[p['original']] = v
elif isinstance(values[p['original']], list):
values[p['original']].append(v)
else:
values[p['original']] = [values[p['original']], v]
continue
# IRI
if p['type'] == 'iri': # TODO: never reached anymore, since iris are now type=string with format=iri
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# A number (without a datatype)
elif p['type'] == 'number':
query = query.replace(p['original'], v)
# Literals
elif p['type'] == 'literal' or p['type'] == 'string':
# If it's a iri
if 'format' in p and p['format'] == 'iri':
query = query.replace(p['original'], "{}{}{}".format('<', v, '>'))
# If there is a language tag
if 'lang' in p and p['lang']:
query = query.replace(p['original'], "\"{}\"@{}".format(v, p['lang']))
elif 'datatype' in p and p['datatype']:
query = query.replace(p['original'], "\"{}\"^^{}".format(v, p['datatype']))
if 'xsd' in p['datatype']:
requireXSD = True
else:
query = query.replace(p['original'], "\"{}\"".format(v))
if isinstance(query, dict): # json query (sparql transformer)
rq, proto, opt = SPARQLTransformer.pre_process(query)
query = rq.strip()
if requireXSD and XSD_PREFIX not in query:
query = query.replace('SELECT', XSD_PREFIX + '\n\nSELECT')
glogger.debug("Query rewritten as: " + query)
return query
|
CLARIAH/grlc | src/fileLoaders.py | BaseLoader.getProjectionForQueryName | python | def getProjectionForQueryName(self, query_name):
projectionFileName = query_name + '.pyql'
projectionText = self._getText(projectionFileName)
return projectionText | TODO: DOCUMENT !!
Returns None if no such projection exists | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/fileLoaders.py#L35-L41 | null | class BaseLoader:
def getTextForName(self, query_name):
# The URIs of all candidates
rq_name = query_name + '.rq'
sparql_name = query_name + '.sparql'
tpf_name = query_name + '.tpf'
json_name = query_name + '.json'
candidates = [
(rq_name, qType['SPARQL']),
(sparql_name, qType['SPARQL']),
(tpf_name, qType['TPF']),
(json_name, qType['JSON'])
]
for queryFullName, queryType in candidates:
queryText = self._getText(queryFullName)
if queryText:
if (queryType == qType['JSON']):
queryText = json.loads(queryText)
return queryText, queryType
# No query found...
return '', None
def getLicenceURL(self):
for f in self.fetchFiles():
if f['name'].lower() == 'license' or f['name'].lower() == 'licence':
return f['download_url']
return None
|
CLARIAH/grlc | src/fileLoaders.py | LocalLoader.fetchFiles | python | def fetchFiles(self):
print("Fetching files from {}".format(self.baseDir))
files = glob(path.join(self.baseDir, '*'))
filesDef = []
for f in files:
print("Found SPARQL file {}".format(f))
relative = f.replace(self.baseDir, '')
filesDef.append({
'download_url': relative,
'name': relative
})
return filesDef | Returns a list of file items contained on the local repo. | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/fileLoaders.py#L125-L137 | null | class LocalLoader(BaseLoader):
def __init__(self, baseDir=static.LOCAL_SPARQL_DIR):
self.baseDir = baseDir
def getRawRepoUri(self):
"""Returns the root url of the local repo."""
return ''
def getTextFor(self, fileItem):
"""Returns the contents of the given file item on the local repo."""
return self._getText(fileItem['download_url'])
def _getText(self, filename):
targetFile = self.baseDir + filename
if path.exists(targetFile):
with open(targetFile, 'r') as f:
lines = f.readlines()
text = ''.join(lines)
return text
else:
return None
def getRepoTitle(self):
return 'local'
def getContactName(self):
return ''
def getContactUrl(self):
return ''
def getCommitList(self):
return ['local']
def getFullName(self):
return 'local/local'
def getRepoURI(self):
return 'local-file-system'
|
CLARIAH/grlc | src/swagger.py | get_repo_info | python | def get_repo_info(loader, sha, prov_g):
user_repo = loader.getFullName()
repo_title = loader.getRepoTitle()
contact_name = loader.getContactName()
contact_url = loader.getContactUrl()
commit_list = loader.getCommitList()
licence_url = loader.getLicenceURL()
# Add the API URI as a used entity by the activity
if prov_g:
prov_g.add_used_entity(loader.getRepoURI())
prev_commit = None
next_commit = None
version = sha if sha else commit_list[0]
if commit_list.index(version) < len(commit_list) - 1:
prev_commit = commit_list[commit_list.index(version) + 1]
if commit_list.index(version) > 0:
next_commit = commit_list[commit_list.index(version) - 1]
info = {
'version': version,
'title': repo_title,
'contact': {
'name': contact_name,
'url': contact_url
},
'license': {
'name': 'License',
'url': licence_url
}
}
basePath = '/api/' + user_repo + '/'
basePath += ('commit/' + sha + '/') if sha else ''
return prev_commit, next_commit, info, basePath | Generate swagger information from the repo being used. | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/swagger.py#L24-L61 | null | import json
import grlc.utils
import grlc.gquery as gquery
import grlc.pagination as pageUtils
import traceback
import logging
glogger = logging.getLogger(__name__)
def get_blank_spec():
"""Creates the base (blank) structure of swagger specification."""
swag = {}
swag['swagger'] = '2.0'
swag['schemes'] = [] # 'http' or 'https' -- leave blank to make it dependent on how UI is loaded
swag['paths'] = {}
swag['definitions'] = {
'Message': {'type': 'string'}
}
return swag
def get_path_for_item(item):
query = item['original_query']
if isinstance(query, dict):
del query['grlc']
query = "\n" + json.dumps(query, indent=2) + "\n"
description = item['description']
description += '\n\n```{}```'.format(query)
description += '\n\nSPARQL projection:\n```pythonql\n{}```'.format(
item['projection']) if 'projection' in item else ''
item_path = {
item['method']: {
'tags': item['tags'],
'summary': item['summary'],
'description': description,
'produces': ['text/csv', 'application/json', 'text/html'],
'parameters': item['params'] if 'params' in item else None,
'responses': {
'200': {
'description': 'Query response',
'schema': {
'type': 'array',
'items': {
'type': 'object',
'properties': item['item_properties'] if 'item_properties' in item else None
},
}
},
'default': {
'description': 'Unexpected error',
'schema': {
'$ref': '#/definitions/Message'
}
}
}
}
}
if 'projection' in item:
item_path['projection'] = item['projection']
return item_path
def build_spec(user, repo, sha=None, prov=None, extraMetadata=[]):
"""Build grlc specification for the given github user / repo."""
loader = grlc.utils.getLoader(user, repo, sha=sha, prov=prov)
files = loader.fetchFiles()
raw_repo_uri = loader.getRawRepoUri()
# Fetch all .rq files
items = []
allowed_ext = ["rq", "sparql", "json", "tpf"]
for c in files:
glogger.debug('>>>>>>>>>>>>>>>>>>>>>>>>>c_name: {}'.format(c['name']))
extension = c['name'].split('.')[-1]
if extension in allowed_ext:
call_name = c['name'].split('.')[0]
# Retrieve extra metadata from the query decorators
query_text = loader.getTextFor(c)
item = None
if extension == "json":
query_text = json.loads(query_text)
if extension in ["rq", "sparql", "json"]:
glogger.debug("===================================================================")
glogger.debug("Processing SPARQL query: {}".format(c['name']))
glogger.debug("===================================================================")
item = process_sparql_query_text(query_text, loader, call_name, extraMetadata)
elif "tpf" == extension:
glogger.debug("===================================================================")
glogger.debug("Processing TPF query: {}".format(c['name']))
glogger.debug("===================================================================")
item = process_tpf_query_text(query_text, raw_repo_uri, call_name, extraMetadata)
else:
glogger.info("Ignoring unsupported source call name: {}".format(c['name']))
if item:
items.append(item)
return items
def process_tpf_query_text(query_text, raw_repo_uri, call_name, extraMetadata):
query_metadata = gquery.get_yaml_decorators(query_text)
tags = query_metadata['tags'] if 'tags' in query_metadata else []
glogger.debug("Read query tags: " + ', '.join(tags))
summary = query_metadata['summary'] if 'summary' in query_metadata else ""
glogger.debug("Read query summary: " + summary)
description = query_metadata['description'] if 'description' in query_metadata else ""
glogger.debug("Read query description: " + description)
method = query_metadata['method'].lower() if 'method' in query_metadata else "get"
if method not in ['get', 'post', 'head', 'put', 'delete', 'options', 'connect']:
method = "get"
pagination = query_metadata['pagination'] if 'pagination' in query_metadata else ""
glogger.debug("Read query pagination: " + str(pagination))
endpoint = query_metadata['endpoint'] if 'endpoint' in query_metadata else ""
glogger.debug("Read query endpoint: " + endpoint)
# If this query allows pagination, add page number as parameter
params = []
if pagination:
params.append(pageUtils.getSwaggerPaginationDef(pagination))
item = packItem('/' + call_name, method, tags, summary, description, params, query_metadata, extraMetadata)
return item
def process_sparql_query_text(query_text, loader, call_name, extraMetadata):
# We get the endpoint name first, since some query metadata fields (eg enums) require it
endpoint, auth = gquery.guess_endpoint_uri(query_text, loader)
glogger.debug("Read query endpoint: {}".format(endpoint))
try:
query_metadata = gquery.get_metadata(query_text, endpoint)
except Exception:
raw_repo_uri = loader.getRawRepoUri()
raw_query_uri = raw_repo_uri + ' / ' + call_name
glogger.error("Could not parse query at {}".format(raw_query_uri))
glogger.error(traceback.print_exc())
return None
tags = query_metadata['tags'] if 'tags' in query_metadata else []
summary = query_metadata['summary'] if 'summary' in query_metadata else ""
description = query_metadata['description'] if 'description' in query_metadata else ""
method = query_metadata['method'].lower() if 'method' in query_metadata else ""
if method not in ['get', 'post', 'head', 'put', 'delete', 'options', 'connect']:
method = ""
pagination = query_metadata['pagination'] if 'pagination' in query_metadata else ""
endpoint_in_url = query_metadata['endpoint_in_url'] if 'endpoint_in_url' in query_metadata else True
projection = loader.getProjectionForQueryName(call_name)
# Processing of the parameters
params = []
# PV properties
item_properties = {}
# If this query allows pagination, add page number as parameter
if pagination:
params.append(pageUtils.getSwaggerPaginationDef(pagination))
if query_metadata['type'] in ['SelectQuery', 'ConstructQuery', 'InsertData']:
# TODO: do something intelligent with the parameters!
# As per #3, prefetching IRIs via SPARQL and filling enum
parameters = query_metadata['parameters']
for v, p in list(parameters.items()):
param = {}
param['name'] = p['name']
param['type'] = p['type']
param['required'] = p['required']
param['in'] = "query"
param['description'] = "A value of type {} that will substitute {} in the original query".format(p['type'],
p[
'original'])
if 'lang' in p:
param['description'] = "A value of type {}@{} that will substitute {} in the original query".format(
p['type'], p['lang'], p['original'])
if 'format' in p:
param['format'] = p['format']
param['description'] = "A value of type {} ({}) that will substitute {} in the original query".format(
p['type'], p['format'], p['original'])
if 'enum' in p:
param['enum'] = p['enum']
if 'default' in p:
param['default'] = p['default']
params.append(param)
if endpoint_in_url:
endpoint_param = {}
endpoint_param['name'] = "endpoint"
endpoint_param['type'] = "string"
endpoint_param['in'] = "query"
endpoint_param['description'] = "Alternative endpoint for SPARQL query"
endpoint_param['default'] = endpoint
params.append(endpoint_param)
if query_metadata['type'] == 'SelectQuery':
# Fill in the spec for SELECT
if not method:
method = 'get'
for pv in query_metadata['variables']:
item_properties[pv] = {
"name": pv,
"type": "object",
"required": ["type", "value"],
"properties": {
"type": {
"type": "string"
},
"value": {
"type": "string"
},
"xml:lang": {
"type": "string"
},
"datatype": {
"type": "string"
}
}
}
elif query_metadata['type'] == 'ConstructQuery':
if not method:
method = 'get'
elif query_metadata['type'] == 'UNKNOWN':
glogger.warning("grlc could not parse this query; assuming a plain, non-parametric SELECT in the API spec")
if not method:
method = 'get'
else:
# TODO: process all other kinds of queries
glogger.warning("Query of type {} is currently unsupported! Skipping".format(query_metadata['type']))
# Finally: main structure of the callname spec
item = packItem('/' + call_name, method, tags, summary, description, params, query_metadata, extraMetadata,
projection)
return item
def packItem(call_name, method, tags, summary, description, params, query_metadata, extraMetadata, projection=None):
item = {
'call_name': call_name,
'method': method,
'tags': tags,
'summary': summary,
'description': description,
'params': params,
'item_properties': None, # From projection variables, only SelectQuery
'query': query_metadata['query'],
'original_query': query_metadata.get('original_query', query_metadata['query'])
}
if projection:
item['projection'] = projection # SPARQL projection PyQL file is available
for extraField in extraMetadata:
if extraField in query_metadata:
item[extraField] = query_metadata[extraField]
return item
|
CLARIAH/grlc | src/swagger.py | build_spec | python | def build_spec(user, repo, sha=None, prov=None, extraMetadata=[]):
loader = grlc.utils.getLoader(user, repo, sha=sha, prov=prov)
files = loader.fetchFiles()
raw_repo_uri = loader.getRawRepoUri()
# Fetch all .rq files
items = []
allowed_ext = ["rq", "sparql", "json", "tpf"]
for c in files:
glogger.debug('>>>>>>>>>>>>>>>>>>>>>>>>>c_name: {}'.format(c['name']))
extension = c['name'].split('.')[-1]
if extension in allowed_ext:
call_name = c['name'].split('.')[0]
# Retrieve extra metadata from the query decorators
query_text = loader.getTextFor(c)
item = None
if extension == "json":
query_text = json.loads(query_text)
if extension in ["rq", "sparql", "json"]:
glogger.debug("===================================================================")
glogger.debug("Processing SPARQL query: {}".format(c['name']))
glogger.debug("===================================================================")
item = process_sparql_query_text(query_text, loader, call_name, extraMetadata)
elif "tpf" == extension:
glogger.debug("===================================================================")
glogger.debug("Processing TPF query: {}".format(c['name']))
glogger.debug("===================================================================")
item = process_tpf_query_text(query_text, raw_repo_uri, call_name, extraMetadata)
else:
glogger.info("Ignoring unsupported source call name: {}".format(c['name']))
if item:
items.append(item)
return items | Build grlc specification for the given github user / repo. | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/swagger.py#L107-L146 | [
"def process_sparql_query_text(query_text, loader, call_name, extraMetadata):\n # We get the endpoint name first, since some query metadata fields (eg enums) require it\n\n endpoint, auth = gquery.guess_endpoint_uri(query_text, loader)\n glogger.debug(\"Read query endpoint: {}\".format(endpoint))\n\n tr... | import json
import grlc.utils
import grlc.gquery as gquery
import grlc.pagination as pageUtils
import traceback
import logging
glogger = logging.getLogger(__name__)
def get_blank_spec():
"""Creates the base (blank) structure of swagger specification."""
swag = {}
swag['swagger'] = '2.0'
swag['schemes'] = [] # 'http' or 'https' -- leave blank to make it dependent on how UI is loaded
swag['paths'] = {}
swag['definitions'] = {
'Message': {'type': 'string'}
}
return swag
def get_repo_info(loader, sha, prov_g):
"""Generate swagger information from the repo being used."""
user_repo = loader.getFullName()
repo_title = loader.getRepoTitle()
contact_name = loader.getContactName()
contact_url = loader.getContactUrl()
commit_list = loader.getCommitList()
licence_url = loader.getLicenceURL()
# Add the API URI as a used entity by the activity
if prov_g:
prov_g.add_used_entity(loader.getRepoURI())
prev_commit = None
next_commit = None
version = sha if sha else commit_list[0]
if commit_list.index(version) < len(commit_list) - 1:
prev_commit = commit_list[commit_list.index(version) + 1]
if commit_list.index(version) > 0:
next_commit = commit_list[commit_list.index(version) - 1]
info = {
'version': version,
'title': repo_title,
'contact': {
'name': contact_name,
'url': contact_url
},
'license': {
'name': 'License',
'url': licence_url
}
}
basePath = '/api/' + user_repo + '/'
basePath += ('commit/' + sha + '/') if sha else ''
return prev_commit, next_commit, info, basePath
def get_path_for_item(item):
query = item['original_query']
if isinstance(query, dict):
del query['grlc']
query = "\n" + json.dumps(query, indent=2) + "\n"
description = item['description']
description += '\n\n```{}```'.format(query)
description += '\n\nSPARQL projection:\n```pythonql\n{}```'.format(
item['projection']) if 'projection' in item else ''
item_path = {
item['method']: {
'tags': item['tags'],
'summary': item['summary'],
'description': description,
'produces': ['text/csv', 'application/json', 'text/html'],
'parameters': item['params'] if 'params' in item else None,
'responses': {
'200': {
'description': 'Query response',
'schema': {
'type': 'array',
'items': {
'type': 'object',
'properties': item['item_properties'] if 'item_properties' in item else None
},
}
},
'default': {
'description': 'Unexpected error',
'schema': {
'$ref': '#/definitions/Message'
}
}
}
}
}
if 'projection' in item:
item_path['projection'] = item['projection']
return item_path
def process_tpf_query_text(query_text, raw_repo_uri, call_name, extraMetadata):
query_metadata = gquery.get_yaml_decorators(query_text)
tags = query_metadata['tags'] if 'tags' in query_metadata else []
glogger.debug("Read query tags: " + ', '.join(tags))
summary = query_metadata['summary'] if 'summary' in query_metadata else ""
glogger.debug("Read query summary: " + summary)
description = query_metadata['description'] if 'description' in query_metadata else ""
glogger.debug("Read query description: " + description)
method = query_metadata['method'].lower() if 'method' in query_metadata else "get"
if method not in ['get', 'post', 'head', 'put', 'delete', 'options', 'connect']:
method = "get"
pagination = query_metadata['pagination'] if 'pagination' in query_metadata else ""
glogger.debug("Read query pagination: " + str(pagination))
endpoint = query_metadata['endpoint'] if 'endpoint' in query_metadata else ""
glogger.debug("Read query endpoint: " + endpoint)
# If this query allows pagination, add page number as parameter
params = []
if pagination:
params.append(pageUtils.getSwaggerPaginationDef(pagination))
item = packItem('/' + call_name, method, tags, summary, description, params, query_metadata, extraMetadata)
return item
def process_sparql_query_text(query_text, loader, call_name, extraMetadata):
# We get the endpoint name first, since some query metadata fields (eg enums) require it
endpoint, auth = gquery.guess_endpoint_uri(query_text, loader)
glogger.debug("Read query endpoint: {}".format(endpoint))
try:
query_metadata = gquery.get_metadata(query_text, endpoint)
except Exception:
raw_repo_uri = loader.getRawRepoUri()
raw_query_uri = raw_repo_uri + ' / ' + call_name
glogger.error("Could not parse query at {}".format(raw_query_uri))
glogger.error(traceback.print_exc())
return None
tags = query_metadata['tags'] if 'tags' in query_metadata else []
summary = query_metadata['summary'] if 'summary' in query_metadata else ""
description = query_metadata['description'] if 'description' in query_metadata else ""
method = query_metadata['method'].lower() if 'method' in query_metadata else ""
if method not in ['get', 'post', 'head', 'put', 'delete', 'options', 'connect']:
method = ""
pagination = query_metadata['pagination'] if 'pagination' in query_metadata else ""
endpoint_in_url = query_metadata['endpoint_in_url'] if 'endpoint_in_url' in query_metadata else True
projection = loader.getProjectionForQueryName(call_name)
# Processing of the parameters
params = []
# PV properties
item_properties = {}
# If this query allows pagination, add page number as parameter
if pagination:
params.append(pageUtils.getSwaggerPaginationDef(pagination))
if query_metadata['type'] in ['SelectQuery', 'ConstructQuery', 'InsertData']:
# TODO: do something intelligent with the parameters!
# As per #3, prefetching IRIs via SPARQL and filling enum
parameters = query_metadata['parameters']
for v, p in list(parameters.items()):
param = {}
param['name'] = p['name']
param['type'] = p['type']
param['required'] = p['required']
param['in'] = "query"
param['description'] = "A value of type {} that will substitute {} in the original query".format(p['type'],
p[
'original'])
if 'lang' in p:
param['description'] = "A value of type {}@{} that will substitute {} in the original query".format(
p['type'], p['lang'], p['original'])
if 'format' in p:
param['format'] = p['format']
param['description'] = "A value of type {} ({}) that will substitute {} in the original query".format(
p['type'], p['format'], p['original'])
if 'enum' in p:
param['enum'] = p['enum']
if 'default' in p:
param['default'] = p['default']
params.append(param)
if endpoint_in_url:
endpoint_param = {}
endpoint_param['name'] = "endpoint"
endpoint_param['type'] = "string"
endpoint_param['in'] = "query"
endpoint_param['description'] = "Alternative endpoint for SPARQL query"
endpoint_param['default'] = endpoint
params.append(endpoint_param)
if query_metadata['type'] == 'SelectQuery':
# Fill in the spec for SELECT
if not method:
method = 'get'
for pv in query_metadata['variables']:
item_properties[pv] = {
"name": pv,
"type": "object",
"required": ["type", "value"],
"properties": {
"type": {
"type": "string"
},
"value": {
"type": "string"
},
"xml:lang": {
"type": "string"
},
"datatype": {
"type": "string"
}
}
}
elif query_metadata['type'] == 'ConstructQuery':
if not method:
method = 'get'
elif query_metadata['type'] == 'UNKNOWN':
glogger.warning("grlc could not parse this query; assuming a plain, non-parametric SELECT in the API spec")
if not method:
method = 'get'
else:
# TODO: process all other kinds of queries
glogger.warning("Query of type {} is currently unsupported! Skipping".format(query_metadata['type']))
# Finally: main structure of the callname spec
item = packItem('/' + call_name, method, tags, summary, description, params, query_metadata, extraMetadata,
projection)
return item
def packItem(call_name, method, tags, summary, description, params, query_metadata, extraMetadata, projection=None):
item = {
'call_name': call_name,
'method': method,
'tags': tags,
'summary': summary,
'description': description,
'params': params,
'item_properties': None, # From projection variables, only SelectQuery
'query': query_metadata['query'],
'original_query': query_metadata.get('original_query', query_metadata['query'])
}
if projection:
item['projection'] = projection # SPARQL projection PyQL file is available
for extraField in extraMetadata:
if extraField in query_metadata:
item[extraField] = query_metadata[extraField]
return item
|
CLARIAH/grlc | src/sparql.py | getResponseText | python | def getResponseText(endpoint, query, requestedMimeType):
'''
endpoint - URL of sparql endpoint
query - SPARQL query to be executed
requestedMimeType Type of content requested. can be:
'text/csv; q=1.0, */*; q=0.1'
'application/json'
etc.
Returns result + mimetype
'''
retFormat = _mimeTypeToSparqlFormat(requestedMimeType)
client = SPARQLWrapper(endpoint)
client.setQuery(query)
client.setReturnFormat(retFormat)
client.setCredentials(static.DEFAULT_ENDPOINT_USER, static.DEFAULT_ENDPOINT_PASSWORD)
result = client.queryAndConvert()
if retFormat==JSON:
result = jsonify(result)
return result, MIME_FORMAT[retFormat] | endpoint - URL of sparql endpoint
query - SPARQL query to be executed
requestedMimeType Type of content requested. can be:
'text/csv; q=1.0, */*; q=0.1'
'application/json'
etc.
Returns result + mimetype | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/sparql.py#L23-L45 | [
"def _mimeTypeToSparqlFormat(mimeType):\n if ';' in mimeType:\n mimeType = mimeType.split(';')[0].strip()\n return SUPPORTED_MIME_FORMATS[mimeType]\n"
] | import logging
from SPARQLWrapper import SPARQLWrapper, CSV, JSON
from flask import jsonify
from collections import defaultdict
import static as static
glogger = logging.getLogger(__name__)
# Default value is JSON
SUPPORTED_MIME_FORMATS = defaultdict(
lambda: JSON, {
'text/csv': CSV,
'application/json': JSON
}
)
MIME_FORMAT = {
format: mime for mime, format in SUPPORTED_MIME_FORMATS.iteritems()
}
def _mimeTypeToSparqlFormat(mimeType):
if ';' in mimeType:
mimeType = mimeType.split(';')[0].strip()
return SUPPORTED_MIME_FORMATS[mimeType]
|
CLARIAH/grlc | src/utils.py | getLoader | python | def getLoader(user, repo, sha=None, prov=None):
if user is None and repo is None:
loader = LocalLoader()
else:
loader = GithubLoader(user, repo, sha, prov)
return loader | Build a fileLoader (LocalLoader or GithubLoader) for the given repository. | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/utils.py#L30-L36 | null | import grlc.static as static
import grlc.gquery as gquery
import grlc.pagination as pageUtils
import grlc.swagger as swagger
from grlc.prov import grlcPROV
from grlc.fileLoaders import GithubLoader, LocalLoader
from grlc.queryTypes import qType
from grlc.projection import project
from grlc import __version__ as grlc_version
import re
import requests
import json
import logging
import SPARQLTransformer
from rdflib import Graph
glogger = logging.getLogger(__name__)
def turtleize(swag):
""" Transforms a JSON swag object into a text/turtle LDA equivalent representation """
swag_graph = Graph()
# TODO: load swag data onto graph
return swag_graph.serialize(format='turtle')
def build_spec(user, repo, sha=None, prov=None, extraMetadata=[]):
glogger.warning("grlc.utils.build_spec is deprecated and will " \
"be removed in the future. Use grlc.swagger.build_spec instead.")
return swagger.build_spec(user, repo, sha, prov, extraMetadata)
def build_swagger_spec(user, repo, sha, serverName):
"""Build grlc specification for the given github user / repo in swagger format """
if user and repo:
# Init provenance recording
prov_g = grlcPROV(user, repo)
else:
prov_g = None
swag = swagger.get_blank_spec()
swag['host'] = serverName
try:
loader = getLoader(user, repo, sha, prov_g)
except Exception as e:
# If repo does not exits
swag['info'] = {
'title': 'ERROR!',
'description': str(e)
}
swag['paths'] = {}
return swag
prev_commit, next_commit, info, basePath = \
swagger.get_repo_info(loader, sha, prov_g)
swag['prev_commit'] = prev_commit
swag['next_commit'] = next_commit
swag['info'] = info
swag['basePath'] = basePath
# TODO: can we pass loader to build_spec ?
spec = swagger.build_spec(user, repo, sha, prov_g)
for item in spec:
swag['paths'][item['call_name']] = swagger.get_path_for_item(item)
if prov_g:
prov_g.end_prov_graph()
swag['prov'] = prov_g.serialize(format='turtle')
return swag
def dispatch_query(user, repo, query_name, sha=None, content=None, requestArgs={}, acceptHeader='application/json',
requestUrl='http://', formData={}):
loader = getLoader(user, repo, sha=sha, prov=None)
query, q_type = loader.getTextForName(query_name)
# Call name implemented with SPARQL query
if q_type == qType['SPARQL'] or q_type == qType['JSON']:
resp, status, headers = dispatchSPARQLQuery(query, loader, requestArgs, acceptHeader, content, formData,
requestUrl)
if acceptHeader == 'application/json':
projection = loader.getProjectionForQueryName(query_name)
if projection:
dataIn = json.loads(resp)
dataOut = project(dataIn, projection)
resp = json.dumps(dataOut)
return resp, status, headers
# Call name implemented with TPF query
elif q_type == qType['TPF']:
resp, status, headers = dispatchTPFQuery(query, loader, acceptHeader, content)
return resp, status, headers
else:
return "Couldn't find a SPARQL, RDF dump, or TPF query with the requested name", 404, {}
def dispatchSPARQLQuery(raw_sparql_query, loader, requestArgs, acceptHeader, content, formData, requestUrl):
endpoint, auth = gquery.guess_endpoint_uri(raw_sparql_query, loader)
if endpoint == '':
return 'No SPARQL endpoint indicated', 407, {}
glogger.debug("=====================================================")
glogger.debug("Sending query to SPARQL endpoint: {}".format(endpoint))
glogger.debug("=====================================================")
query_metadata = gquery.get_metadata(raw_sparql_query, endpoint)
acceptHeader = 'application/json' if isinstance(raw_sparql_query, dict) else acceptHeader
pagination = query_metadata['pagination'] if 'pagination' in query_metadata else ""
rewritten_query = query_metadata['query']
# Rewrite query using parameter values
if query_metadata['type'] == 'SelectQuery' or query_metadata['type'] == 'ConstructQuery':
rewritten_query = gquery.rewrite_query(query_metadata['original_query'], query_metadata['parameters'], requestArgs)
# Rewrite query using pagination
if query_metadata['type'] == 'SelectQuery' and 'pagination' in query_metadata:
rewritten_query = gquery.paginate_query(rewritten_query, query_metadata['pagination'], requestArgs)
resp = None
headers = {}
# If we have a mime field, we load the remote dump and query it locally
if 'mime' in query_metadata and query_metadata['mime']:
glogger.debug(
"Detected {} MIME type, proceeding with locally loading remote dump".format(query_metadata['mime']))
g = Graph()
try:
query_metadata = gquery.get_metadata(raw_sparql_query, endpoint)
g.parse(endpoint, format=query_metadata['mime'])
glogger.debug("Local RDF graph loaded successfully with {} triples".format(len(g)))
except Exception as e:
glogger.error(e)
results = g.query(rewritten_query, result='sparql')
# Prepare return format as requested
resp_string = ""
if 'application/json' in acceptHeader or (content and 'application/json' in static.mimetypes[content]):
resp_string = results.serialize(format='json')
glogger.debug("Results of SPARQL query against locally loaded dump: {}".format(resp_string))
elif 'text/csv' in acceptHeader or (content and 'text/csv' in static.mimetypes[content]):
resp_string = results.serialize(format='csv')
glogger.debug("Results of SPARQL query against locally loaded dump: {}".format(resp_string))
else:
return 'Unacceptable requested format', 415, {}
glogger.debug("Finished processing query against RDF dump, end of use case")
del g
# Check for INSERT/POST
elif query_metadata['type'] == 'InsertData':
glogger.debug("Processing INSERT query")
# Rewrite INSERT
rewritten_query = rewritten_query.replace("?_g_iri", "{}".format(formData.get('g')))
rewritten_query = rewritten_query.replace("<s> <p> <o>", formData.get('data'))
glogger.debug("INSERT query rewritten as {}".format(rewritten_query))
# Prepare HTTP POST request
reqHeaders = {'Accept': acceptHeader, 'Content-Type': 'application/sparql-update'}
response = requests.post(endpoint, data=rewritten_query, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers['Content-Type'] = response.headers['Content-Type']
# If there's no mime type, the endpoint is an actual SPARQL endpoint
else:
# requestedMimeType = static.mimetypes[content] if content else acceptHeader
# result, contentType = sparql.getResponseText(endpoint, query, requestedMimeType)
reqHeaders = {'Accept': acceptHeader}
if content:
reqHeaders = {'Accept': static.mimetypes[content]}
data = {'query': rewritten_query}
glogger.debug('Sending HTTP request to SPARQL endpoint with params: {}'.format(data))
glogger.debug('Sending HTTP request to SPARQL endpoint with headers: {}'.format(reqHeaders))
glogger.debug('Sending HTTP request to SPARQL endpoint with auth: {}'.format(auth))
response = requests.get(endpoint, params=data, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers['Content-Type'] = response.headers['Content-Type']
# If the query is paginated, set link HTTP headers
if pagination:
# Get number of total results
count = gquery.count_query_results(rewritten_query, endpoint)
pageArg = requestArgs.get('page', None)
headerLink = pageUtils.buildPaginationHeader(count, pagination, pageArg, requestUrl)
headers['Link'] = headerLink
if 'proto' in query_metadata: # sparql transformer
resp = SPARQLTransformer.post_process(json.loads(resp), query_metadata['proto'], query_metadata['opt'])
headers['Server'] = 'grlc/' + grlc_version
return resp, 200, headers
def dispatchTPFQuery(raw_tpf_query, loader, acceptHeader, content):
endpoint, auth = gquery.guess_endpoint_uri(raw_tpf_query, loader)
glogger.debug("=====================================================")
glogger.debug("Sending query to TPF endpoint: {}".format(endpoint))
glogger.debug("=====================================================")
# TODO: pagination for TPF
# Preapre HTTP request
reqHeaders = {'Accept': acceptHeader, 'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}
if content:
reqHeaders = {'Accept': static.mimetypes[content], 'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}
tpf_list = re.split('\n|=', raw_tpf_query)
subject = tpf_list[tpf_list.index('subject') + 1]
predicate = tpf_list[tpf_list.index('predicate') + 1]
object = tpf_list[tpf_list.index('object') + 1]
data = {'subject': subject, 'predicate': predicate, 'object': object}
response = requests.get(endpoint, params=data, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers = {}
headers['Content-Type'] = response.headers['Content-Type']
headers['Server'] = 'grlc/' + grlc_version
return resp, 200, headers
|
CLARIAH/grlc | src/utils.py | build_swagger_spec | python | def build_swagger_spec(user, repo, sha, serverName):
if user and repo:
# Init provenance recording
prov_g = grlcPROV(user, repo)
else:
prov_g = None
swag = swagger.get_blank_spec()
swag['host'] = serverName
try:
loader = getLoader(user, repo, sha, prov_g)
except Exception as e:
# If repo does not exits
swag['info'] = {
'title': 'ERROR!',
'description': str(e)
}
swag['paths'] = {}
return swag
prev_commit, next_commit, info, basePath = \
swagger.get_repo_info(loader, sha, prov_g)
swag['prev_commit'] = prev_commit
swag['next_commit'] = next_commit
swag['info'] = info
swag['basePath'] = basePath
# TODO: can we pass loader to build_spec ?
spec = swagger.build_spec(user, repo, sha, prov_g)
for item in spec:
swag['paths'][item['call_name']] = swagger.get_path_for_item(item)
if prov_g:
prov_g.end_prov_graph()
swag['prov'] = prov_g.serialize(format='turtle')
return swag | Build grlc specification for the given github user / repo in swagger format | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/utils.py#L45-L82 | [
"def getLoader(user, repo, sha=None, prov=None):\n \"\"\"Build a fileLoader (LocalLoader or GithubLoader) for the given repository.\"\"\"\n if user is None and repo is None:\n loader = LocalLoader()\n else:\n loader = GithubLoader(user, repo, sha, prov)\n return loader\n"
] | import grlc.static as static
import grlc.gquery as gquery
import grlc.pagination as pageUtils
import grlc.swagger as swagger
from grlc.prov import grlcPROV
from grlc.fileLoaders import GithubLoader, LocalLoader
from grlc.queryTypes import qType
from grlc.projection import project
from grlc import __version__ as grlc_version
import re
import requests
import json
import logging
import SPARQLTransformer
from rdflib import Graph
glogger = logging.getLogger(__name__)
def turtleize(swag):
""" Transforms a JSON swag object into a text/turtle LDA equivalent representation """
swag_graph = Graph()
# TODO: load swag data onto graph
return swag_graph.serialize(format='turtle')
def getLoader(user, repo, sha=None, prov=None):
"""Build a fileLoader (LocalLoader or GithubLoader) for the given repository."""
if user is None and repo is None:
loader = LocalLoader()
else:
loader = GithubLoader(user, repo, sha, prov)
return loader
def build_spec(user, repo, sha=None, prov=None, extraMetadata=[]):
glogger.warning("grlc.utils.build_spec is deprecated and will " \
"be removed in the future. Use grlc.swagger.build_spec instead.")
return swagger.build_spec(user, repo, sha, prov, extraMetadata)
def dispatch_query(user, repo, query_name, sha=None, content=None, requestArgs={}, acceptHeader='application/json',
requestUrl='http://', formData={}):
loader = getLoader(user, repo, sha=sha, prov=None)
query, q_type = loader.getTextForName(query_name)
# Call name implemented with SPARQL query
if q_type == qType['SPARQL'] or q_type == qType['JSON']:
resp, status, headers = dispatchSPARQLQuery(query, loader, requestArgs, acceptHeader, content, formData,
requestUrl)
if acceptHeader == 'application/json':
projection = loader.getProjectionForQueryName(query_name)
if projection:
dataIn = json.loads(resp)
dataOut = project(dataIn, projection)
resp = json.dumps(dataOut)
return resp, status, headers
# Call name implemented with TPF query
elif q_type == qType['TPF']:
resp, status, headers = dispatchTPFQuery(query, loader, acceptHeader, content)
return resp, status, headers
else:
return "Couldn't find a SPARQL, RDF dump, or TPF query with the requested name", 404, {}
def dispatchSPARQLQuery(raw_sparql_query, loader, requestArgs, acceptHeader, content, formData, requestUrl):
endpoint, auth = gquery.guess_endpoint_uri(raw_sparql_query, loader)
if endpoint == '':
return 'No SPARQL endpoint indicated', 407, {}
glogger.debug("=====================================================")
glogger.debug("Sending query to SPARQL endpoint: {}".format(endpoint))
glogger.debug("=====================================================")
query_metadata = gquery.get_metadata(raw_sparql_query, endpoint)
acceptHeader = 'application/json' if isinstance(raw_sparql_query, dict) else acceptHeader
pagination = query_metadata['pagination'] if 'pagination' in query_metadata else ""
rewritten_query = query_metadata['query']
# Rewrite query using parameter values
if query_metadata['type'] == 'SelectQuery' or query_metadata['type'] == 'ConstructQuery':
rewritten_query = gquery.rewrite_query(query_metadata['original_query'], query_metadata['parameters'], requestArgs)
# Rewrite query using pagination
if query_metadata['type'] == 'SelectQuery' and 'pagination' in query_metadata:
rewritten_query = gquery.paginate_query(rewritten_query, query_metadata['pagination'], requestArgs)
resp = None
headers = {}
# If we have a mime field, we load the remote dump and query it locally
if 'mime' in query_metadata and query_metadata['mime']:
glogger.debug(
"Detected {} MIME type, proceeding with locally loading remote dump".format(query_metadata['mime']))
g = Graph()
try:
query_metadata = gquery.get_metadata(raw_sparql_query, endpoint)
g.parse(endpoint, format=query_metadata['mime'])
glogger.debug("Local RDF graph loaded successfully with {} triples".format(len(g)))
except Exception as e:
glogger.error(e)
results = g.query(rewritten_query, result='sparql')
# Prepare return format as requested
resp_string = ""
if 'application/json' in acceptHeader or (content and 'application/json' in static.mimetypes[content]):
resp_string = results.serialize(format='json')
glogger.debug("Results of SPARQL query against locally loaded dump: {}".format(resp_string))
elif 'text/csv' in acceptHeader or (content and 'text/csv' in static.mimetypes[content]):
resp_string = results.serialize(format='csv')
glogger.debug("Results of SPARQL query against locally loaded dump: {}".format(resp_string))
else:
return 'Unacceptable requested format', 415, {}
glogger.debug("Finished processing query against RDF dump, end of use case")
del g
# Check for INSERT/POST
elif query_metadata['type'] == 'InsertData':
glogger.debug("Processing INSERT query")
# Rewrite INSERT
rewritten_query = rewritten_query.replace("?_g_iri", "{}".format(formData.get('g')))
rewritten_query = rewritten_query.replace("<s> <p> <o>", formData.get('data'))
glogger.debug("INSERT query rewritten as {}".format(rewritten_query))
# Prepare HTTP POST request
reqHeaders = {'Accept': acceptHeader, 'Content-Type': 'application/sparql-update'}
response = requests.post(endpoint, data=rewritten_query, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers['Content-Type'] = response.headers['Content-Type']
# If there's no mime type, the endpoint is an actual SPARQL endpoint
else:
# requestedMimeType = static.mimetypes[content] if content else acceptHeader
# result, contentType = sparql.getResponseText(endpoint, query, requestedMimeType)
reqHeaders = {'Accept': acceptHeader}
if content:
reqHeaders = {'Accept': static.mimetypes[content]}
data = {'query': rewritten_query}
glogger.debug('Sending HTTP request to SPARQL endpoint with params: {}'.format(data))
glogger.debug('Sending HTTP request to SPARQL endpoint with headers: {}'.format(reqHeaders))
glogger.debug('Sending HTTP request to SPARQL endpoint with auth: {}'.format(auth))
response = requests.get(endpoint, params=data, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers['Content-Type'] = response.headers['Content-Type']
# If the query is paginated, set link HTTP headers
if pagination:
# Get number of total results
count = gquery.count_query_results(rewritten_query, endpoint)
pageArg = requestArgs.get('page', None)
headerLink = pageUtils.buildPaginationHeader(count, pagination, pageArg, requestUrl)
headers['Link'] = headerLink
if 'proto' in query_metadata: # sparql transformer
resp = SPARQLTransformer.post_process(json.loads(resp), query_metadata['proto'], query_metadata['opt'])
headers['Server'] = 'grlc/' + grlc_version
return resp, 200, headers
def dispatchTPFQuery(raw_tpf_query, loader, acceptHeader, content):
endpoint, auth = gquery.guess_endpoint_uri(raw_tpf_query, loader)
glogger.debug("=====================================================")
glogger.debug("Sending query to TPF endpoint: {}".format(endpoint))
glogger.debug("=====================================================")
# TODO: pagination for TPF
# Preapre HTTP request
reqHeaders = {'Accept': acceptHeader, 'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}
if content:
reqHeaders = {'Accept': static.mimetypes[content], 'Authorization': 'token {}'.format(static.ACCESS_TOKEN)}
tpf_list = re.split('\n|=', raw_tpf_query)
subject = tpf_list[tpf_list.index('subject') + 1]
predicate = tpf_list[tpf_list.index('predicate') + 1]
object = tpf_list[tpf_list.index('object') + 1]
data = {'subject': subject, 'predicate': predicate, 'object': object}
response = requests.get(endpoint, params=data, headers=reqHeaders, auth=auth)
glogger.debug('Response header from endpoint: ' + response.headers['Content-Type'])
# Response headers
resp = response.text
headers = {}
headers['Content-Type'] = response.headers['Content-Type']
headers['Server'] = 'grlc/' + grlc_version
return resp, 200, headers
|
CLARIAH/grlc | src/pagination.py | buildPaginationHeader | python | def buildPaginationHeader(resultCount, resultsPerPage, pageArg, url):
'''Build link header for result pagination'''
lastPage = resultCount / resultsPerPage
if pageArg:
page = int(pageArg)
next_url = re.sub("page=[0-9]+", "page={}".format(page + 1), url)
prev_url = re.sub("page=[0-9]+", "page={}".format(page - 1), url)
first_url = re.sub("page=[0-9]+", "page=1", url)
last_url = re.sub("page=[0-9]+", "page={}".format(lastPage), url)
else:
page = 1
next_url = url + "?page=2"
prev_url = ""
first_url = url + "?page=1"
last_url = url + "?page={}".format(lastPage)
if page == 1:
headerLink = "<{}>; rel=next, <{}>; rel=last".format(next_url, last_url)
elif page == lastPage:
headerLink = "<{}>; rel=prev, <{}>; rel=first".format(prev_url, first_url)
else:
headerLink = "<{}>; rel=next, <{}>; rel=prev, <{}>; rel=first, <{}>; rel=last".format(next_url, prev_url, first_url, last_url)
return headerLink | Build link header for result pagination | train | https://github.com/CLARIAH/grlc/blob/f5664e34f039010c00ef8ebb69917c05e8ce75d7/src/pagination.py#L12-L35 | null | import re
def getSwaggerPaginationDef(resultsPerPage):
'''Build swagger spec section for pagination'''
return {
"name": "page",
"type": "int",
"in": "query",
"description": "The page number for this paginated query ({} results per page)".format(resultsPerPage)
}
|
divio/python-mautic | mautic/emails.py | Emails.send | python | def send(self, obj_id):
response = self._client.session.post(
'{url}/{id}/send'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response) | Send email to the assigned lists
:param obj_id: int
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/emails.py#L10-L22 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Emails(API):
_endpoint = 'emails'
def send_to_contact(self, obj_id, contact_id):
"""
Send email to a specific contact
:param obj_id: int
:param contact_id: int
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/send/contact/{contact_id}'.format(
url=self.endpoint_url, id=obj_id, contact_id=contact_id
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/emails.py | Emails.send_to_contact | python | def send_to_contact(self, obj_id, contact_id):
response = self._client.session.post(
'{url}/{id}/send/contact/{contact_id}'.format(
url=self.endpoint_url, id=obj_id, contact_id=contact_id
)
)
return self.process_response(response) | Send email to a specific contact
:param obj_id: int
:param contact_id: int
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/emails.py#L24-L37 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Emails(API):
_endpoint = 'emails'
def send(self, obj_id):
"""
Send email to the assigned lists
:param obj_id: int
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/send'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/contacts.py | Contacts.get_owners | python | def get_owners(self):
response = self._client.session.get(
'{url}/list/owners'.format(url=self.endpoint_url)
)
return self.process_response(response) | Get a list of users available as contact owners
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/contacts.py#L17-L26 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Contacts(API):
# Contact unsubscribed themselves.
UNSUBSCRIBED = 1
# Contact was unsubscribed due to an unsuccessful send.
BOUNCED = 2
# Contact was manually unsubscribed by user.
MANUAL = 3
_endpoint = 'contacts'
def get_field_list(self):
"""
Get a list of custom fields
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/fields'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_segments(self):
"""
Get a list of contact segments
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/segments'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_events(
self,
obj_id,
search='',
include_events=None,
exclude_events=None,
order_by='',
order_by_dir='ASC',
page=1
):
"""
Get a list of a contact's engagement events
:param obj_id: int Contact ID
:param search: str
:param include_events: list|tuple
:param exclude_events: list|tuple
:param order_by: str
:param order_by_dir: str
:param page: int
:return: dict|str
"""
if include_events is None:
include_events = []
if exclude_events is None:
exclude_events = []
parameters = {
'search': search,
'includeEvents': include_events,
'excludeEvents': exclude_events,
'orderBy': order_by,
'orderByDir': order_by_dir,
'page': page
}
response = self._client.session.get(
'{url}/{id}/events'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_notes(
self,
obj_id,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC'
):
"""
Get a list of a contact's notes
:param obj_id: int Contact ID
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
parameters = {
'search': search,
'start': start,
'limit': limit,
'orderBy': order_by,
'orderByDir': order_by_dir,
}
response = self._client.session.get(
'{url}/{id}/notes'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_segments(self, obj_id):
"""
Get a segment of smart segments the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/segments'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_contact_campaigns(self, obj_id):
"""
Get a segment of campaigns the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/campaigns'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def add_points(self, obj_id, points, **kwargs):
"""
Add the points to a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/plus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def subtract_points(self, obj_id, points, **kwargs):
"""
Subtract points from a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/minus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def add_dnc(
self,
obj_id,
channel='email',
reason=MANUAL,
channel_id=None,
comments='via API'
):
"""
Adds Do Not Contact
:param obj_id: int
:param channel: str
:param reason: str
:param channel_id: int
:param comments: str
:return: dict|str
"""
data = {
'reason': reason,
'channelId': channel_id,
'comments': comments
}
response = self._client.session.post(
'{url}/{id}/dnc/add/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
),
data=data
)
return self.process_response(response)
def remove_dnc(self, obj_id, channel):
"""
Removes Do Not Contact
:param obj_id: int
:param channel: str
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/dnc/remove/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/contacts.py | Contacts.get_events | python | def get_events(
self,
obj_id,
search='',
include_events=None,
exclude_events=None,
order_by='',
order_by_dir='ASC',
page=1
):
if include_events is None:
include_events = []
if exclude_events is None:
exclude_events = []
parameters = {
'search': search,
'includeEvents': include_events,
'excludeEvents': exclude_events,
'orderBy': order_by,
'orderByDir': order_by_dir,
'page': page
}
response = self._client.session.get(
'{url}/{id}/events'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response) | Get a list of a contact's engagement events
:param obj_id: int Contact ID
:param search: str
:param include_events: list|tuple
:param exclude_events: list|tuple
:param order_by: str
:param order_by_dir: str
:param page: int
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/contacts.py#L50-L91 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Contacts(API):
# Contact unsubscribed themselves.
UNSUBSCRIBED = 1
# Contact was unsubscribed due to an unsuccessful send.
BOUNCED = 2
# Contact was manually unsubscribed by user.
MANUAL = 3
_endpoint = 'contacts'
def get_owners(self):
"""
Get a list of users available as contact owners
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/owners'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_field_list(self):
"""
Get a list of custom fields
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/fields'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_segments(self):
"""
Get a list of contact segments
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/segments'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_contact_notes(
self,
obj_id,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC'
):
"""
Get a list of a contact's notes
:param obj_id: int Contact ID
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
parameters = {
'search': search,
'start': start,
'limit': limit,
'orderBy': order_by,
'orderByDir': order_by_dir,
}
response = self._client.session.get(
'{url}/{id}/notes'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_segments(self, obj_id):
"""
Get a segment of smart segments the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/segments'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_contact_campaigns(self, obj_id):
"""
Get a segment of campaigns the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/campaigns'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def add_points(self, obj_id, points, **kwargs):
"""
Add the points to a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/plus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def subtract_points(self, obj_id, points, **kwargs):
"""
Subtract points from a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/minus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def add_dnc(
self,
obj_id,
channel='email',
reason=MANUAL,
channel_id=None,
comments='via API'
):
"""
Adds Do Not Contact
:param obj_id: int
:param channel: str
:param reason: str
:param channel_id: int
:param comments: str
:return: dict|str
"""
data = {
'reason': reason,
'channelId': channel_id,
'comments': comments
}
response = self._client.session.post(
'{url}/{id}/dnc/add/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
),
data=data
)
return self.process_response(response)
def remove_dnc(self, obj_id, channel):
"""
Removes Do Not Contact
:param obj_id: int
:param channel: str
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/dnc/remove/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/contacts.py | Contacts.get_contact_notes | python | def get_contact_notes(
self,
obj_id,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC'
):
parameters = {
'search': search,
'start': start,
'limit': limit,
'orderBy': order_by,
'orderByDir': order_by_dir,
}
response = self._client.session.get(
'{url}/{id}/notes'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response) | Get a list of a contact's notes
:param obj_id: int Contact ID
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/contacts.py#L93-L127 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Contacts(API):
# Contact unsubscribed themselves.
UNSUBSCRIBED = 1
# Contact was unsubscribed due to an unsuccessful send.
BOUNCED = 2
# Contact was manually unsubscribed by user.
MANUAL = 3
_endpoint = 'contacts'
def get_owners(self):
"""
Get a list of users available as contact owners
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/owners'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_field_list(self):
"""
Get a list of custom fields
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/fields'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_segments(self):
"""
Get a list of contact segments
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/segments'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_events(
self,
obj_id,
search='',
include_events=None,
exclude_events=None,
order_by='',
order_by_dir='ASC',
page=1
):
"""
Get a list of a contact's engagement events
:param obj_id: int Contact ID
:param search: str
:param include_events: list|tuple
:param exclude_events: list|tuple
:param order_by: str
:param order_by_dir: str
:param page: int
:return: dict|str
"""
if include_events is None:
include_events = []
if exclude_events is None:
exclude_events = []
parameters = {
'search': search,
'includeEvents': include_events,
'excludeEvents': exclude_events,
'orderBy': order_by,
'orderByDir': order_by_dir,
'page': page
}
response = self._client.session.get(
'{url}/{id}/events'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_segments(self, obj_id):
"""
Get a segment of smart segments the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/segments'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_contact_campaigns(self, obj_id):
"""
Get a segment of campaigns the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/campaigns'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def add_points(self, obj_id, points, **kwargs):
"""
Add the points to a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/plus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def subtract_points(self, obj_id, points, **kwargs):
"""
Subtract points from a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/minus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def add_dnc(
self,
obj_id,
channel='email',
reason=MANUAL,
channel_id=None,
comments='via API'
):
"""
Adds Do Not Contact
:param obj_id: int
:param channel: str
:param reason: str
:param channel_id: int
:param comments: str
:return: dict|str
"""
data = {
'reason': reason,
'channelId': channel_id,
'comments': comments
}
response = self._client.session.post(
'{url}/{id}/dnc/add/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
),
data=data
)
return self.process_response(response)
def remove_dnc(self, obj_id, channel):
"""
Removes Do Not Contact
:param obj_id: int
:param channel: str
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/dnc/remove/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/contacts.py | Contacts.add_points | python | def add_points(self, obj_id, points, **kwargs):
response = self._client.session.post(
'{url}/{id}/points/plus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response) | Add the points to a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/contacts.py#L159-L175 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Contacts(API):
# Contact unsubscribed themselves.
UNSUBSCRIBED = 1
# Contact was unsubscribed due to an unsuccessful send.
BOUNCED = 2
# Contact was manually unsubscribed by user.
MANUAL = 3
_endpoint = 'contacts'
def get_owners(self):
"""
Get a list of users available as contact owners
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/owners'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_field_list(self):
"""
Get a list of custom fields
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/fields'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_segments(self):
"""
Get a list of contact segments
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/segments'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_events(
self,
obj_id,
search='',
include_events=None,
exclude_events=None,
order_by='',
order_by_dir='ASC',
page=1
):
"""
Get a list of a contact's engagement events
:param obj_id: int Contact ID
:param search: str
:param include_events: list|tuple
:param exclude_events: list|tuple
:param order_by: str
:param order_by_dir: str
:param page: int
:return: dict|str
"""
if include_events is None:
include_events = []
if exclude_events is None:
exclude_events = []
parameters = {
'search': search,
'includeEvents': include_events,
'excludeEvents': exclude_events,
'orderBy': order_by,
'orderByDir': order_by_dir,
'page': page
}
response = self._client.session.get(
'{url}/{id}/events'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_notes(
self,
obj_id,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC'
):
"""
Get a list of a contact's notes
:param obj_id: int Contact ID
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
parameters = {
'search': search,
'start': start,
'limit': limit,
'orderBy': order_by,
'orderByDir': order_by_dir,
}
response = self._client.session.get(
'{url}/{id}/notes'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_segments(self, obj_id):
"""
Get a segment of smart segments the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/segments'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_contact_campaigns(self, obj_id):
"""
Get a segment of campaigns the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/campaigns'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def subtract_points(self, obj_id, points, **kwargs):
"""
Subtract points from a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/minus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def add_dnc(
self,
obj_id,
channel='email',
reason=MANUAL,
channel_id=None,
comments='via API'
):
"""
Adds Do Not Contact
:param obj_id: int
:param channel: str
:param reason: str
:param channel_id: int
:param comments: str
:return: dict|str
"""
data = {
'reason': reason,
'channelId': channel_id,
'comments': comments
}
response = self._client.session.post(
'{url}/{id}/dnc/add/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
),
data=data
)
return self.process_response(response)
def remove_dnc(self, obj_id, channel):
"""
Removes Do Not Contact
:param obj_id: int
:param channel: str
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/dnc/remove/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/contacts.py | Contacts.add_dnc | python | def add_dnc(
self,
obj_id,
channel='email',
reason=MANUAL,
channel_id=None,
comments='via API'
):
data = {
'reason': reason,
'channelId': channel_id,
'comments': comments
}
response = self._client.session.post(
'{url}/{id}/dnc/add/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
),
data=data
)
return self.process_response(response) | Adds Do Not Contact
:param obj_id: int
:param channel: str
:param reason: str
:param channel_id: int
:param comments: str
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/contacts.py#L195-L224 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Contacts(API):
# Contact unsubscribed themselves.
UNSUBSCRIBED = 1
# Contact was unsubscribed due to an unsuccessful send.
BOUNCED = 2
# Contact was manually unsubscribed by user.
MANUAL = 3
_endpoint = 'contacts'
def get_owners(self):
"""
Get a list of users available as contact owners
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/owners'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_field_list(self):
"""
Get a list of custom fields
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/fields'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_segments(self):
"""
Get a list of contact segments
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/segments'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_events(
self,
obj_id,
search='',
include_events=None,
exclude_events=None,
order_by='',
order_by_dir='ASC',
page=1
):
"""
Get a list of a contact's engagement events
:param obj_id: int Contact ID
:param search: str
:param include_events: list|tuple
:param exclude_events: list|tuple
:param order_by: str
:param order_by_dir: str
:param page: int
:return: dict|str
"""
if include_events is None:
include_events = []
if exclude_events is None:
exclude_events = []
parameters = {
'search': search,
'includeEvents': include_events,
'excludeEvents': exclude_events,
'orderBy': order_by,
'orderByDir': order_by_dir,
'page': page
}
response = self._client.session.get(
'{url}/{id}/events'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_notes(
self,
obj_id,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC'
):
"""
Get a list of a contact's notes
:param obj_id: int Contact ID
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
parameters = {
'search': search,
'start': start,
'limit': limit,
'orderBy': order_by,
'orderByDir': order_by_dir,
}
response = self._client.session.get(
'{url}/{id}/notes'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_segments(self, obj_id):
"""
Get a segment of smart segments the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/segments'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_contact_campaigns(self, obj_id):
"""
Get a segment of campaigns the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/campaigns'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def add_points(self, obj_id, points, **kwargs):
"""
Add the points to a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/plus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def subtract_points(self, obj_id, points, **kwargs):
"""
Subtract points from a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/minus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def remove_dnc(self, obj_id, channel):
"""
Removes Do Not Contact
:param obj_id: int
:param channel: str
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/dnc/remove/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/contacts.py | Contacts.remove_dnc | python | def remove_dnc(self, obj_id, channel):
response = self._client.session.post(
'{url}/{id}/dnc/remove/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
)
)
return self.process_response(response) | Removes Do Not Contact
:param obj_id: int
:param channel: str
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/contacts.py#L226-L239 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Contacts(API):
# Contact unsubscribed themselves.
UNSUBSCRIBED = 1
# Contact was unsubscribed due to an unsuccessful send.
BOUNCED = 2
# Contact was manually unsubscribed by user.
MANUAL = 3
_endpoint = 'contacts'
def get_owners(self):
"""
Get a list of users available as contact owners
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/owners'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_field_list(self):
"""
Get a list of custom fields
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/fields'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_segments(self):
"""
Get a list of contact segments
:return: dict|str
"""
response = self._client.session.get(
'{url}/list/segments'.format(url=self.endpoint_url)
)
return self.process_response(response)
def get_events(
self,
obj_id,
search='',
include_events=None,
exclude_events=None,
order_by='',
order_by_dir='ASC',
page=1
):
"""
Get a list of a contact's engagement events
:param obj_id: int Contact ID
:param search: str
:param include_events: list|tuple
:param exclude_events: list|tuple
:param order_by: str
:param order_by_dir: str
:param page: int
:return: dict|str
"""
if include_events is None:
include_events = []
if exclude_events is None:
exclude_events = []
parameters = {
'search': search,
'includeEvents': include_events,
'excludeEvents': exclude_events,
'orderBy': order_by,
'orderByDir': order_by_dir,
'page': page
}
response = self._client.session.get(
'{url}/{id}/events'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_notes(
self,
obj_id,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC'
):
"""
Get a list of a contact's notes
:param obj_id: int Contact ID
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
parameters = {
'search': search,
'start': start,
'limit': limit,
'orderBy': order_by,
'orderByDir': order_by_dir,
}
response = self._client.session.get(
'{url}/{id}/notes'.format(
url=self.endpoint_url, id=obj_id
),
params=parameters
)
return self.process_response(response)
def get_contact_segments(self, obj_id):
"""
Get a segment of smart segments the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/segments'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_contact_campaigns(self, obj_id):
"""
Get a segment of campaigns the contact is in
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}/campaigns'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def add_points(self, obj_id, points, **kwargs):
"""
Add the points to a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/plus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def subtract_points(self, obj_id, points, **kwargs):
"""
Subtract points from a contact
:param obj_id: int
:param points: int
:param kwargs: dict 'eventname' and 'actionname'
:return: dict|str
"""
response = self._client.session.post(
'{url}/{id}/points/minus/{points}'.format(
url=self.endpoint_url, id=obj_id, points=points
),
data=kwargs
)
return self.process_response(response)
def add_dnc(
self,
obj_id,
channel='email',
reason=MANUAL,
channel_id=None,
comments='via API'
):
"""
Adds Do Not Contact
:param obj_id: int
:param channel: str
:param reason: str
:param channel_id: int
:param comments: str
:return: dict|str
"""
data = {
'reason': reason,
'channelId': channel_id,
'comments': comments
}
response = self._client.session.post(
'{url}/{id}/dnc/add/{channel}'.format(
url=self.endpoint_url, id=obj_id, channel=channel
),
data=data
)
return self.process_response(response)
|
divio/python-mautic | mautic/api.py | API.get | python | def get(self, obj_id):
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response) | Get a single item
:param obj_id: int
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/api.py#L104-L116 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class API(object):
_endpoint = ''
def __init__(self, client):
self._client = client
self.endpoint_url = '{base_url}/api/{endpoint}'.format(
base_url=self._client.base_url,
endpoint=self._endpoint.strip(' /')
)
@staticmethod
def process_response(response):
if response.ok:
return response.json()
try:
return response.json()
except ValueError:
# no json object could be decoded
return response.content
@staticmethod
def action_not_supported(action):
"""
Returns a not supported error
:param action: str
:return: dict
"""
return {
'error': {
'code': 500,
'message':
'{action} is not supported at this time'.format(action=action)
}
}
def get_list(
self,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC',
published_only=False,
minimal=False
):
"""
Get a list of items
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:param published_only: bool
:param minimal: bool
:return: dict|str
"""
parameters = {}
args = ['search', 'start', 'limit', 'minimal']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg]
if order_by:
parameters['orderBy'] = order_by
if order_by_dir:
parameters['orderByDir'] = order_by_dir
if published_only:
parameters['publishedOnly'] = 'true'
response = self._client.session.get(
self.endpoint_url, params=parameters
)
return self.process_response(response)
def get_published_list(
self, search='', start=0, limit=0, order_by='', order_by_dir='ASC'
):
"""
Proxy function to get_list with published_only set to True
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
return self.get_list(
search=search,
start=start,
limit=limit,
order_by=order_by,
order_by_dir=order_by_dir,
published_only=True
)
def create(self, parameters):
"""
Create a new item (if supported)
:param parameters: dict
:return: dict|str
"""
response = self._client.session.post(
'{url}/new'.format(url=self.endpoint_url), data=parameters
)
return self.process_response(response)
def edit(self, obj_id, parameters, create_if_not_exists=False):
"""
Edit an item with option to create if it doesn't exist
:param obj_id: int
:param create_if_not_exists: bool
:param parameters: dict
:return: dict|str
"""
if create_if_not_exists:
response = self._client.session.put(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
else:
response = self._client.session.patch(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
return self.process_response(response)
def delete(self, obj_id):
"""
Delete an item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.delete(
'{url}/{id}/delete'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/api.py | API.get_list | python | def get_list(
self,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC',
published_only=False,
minimal=False
):
parameters = {}
args = ['search', 'start', 'limit', 'minimal']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg]
if order_by:
parameters['orderBy'] = order_by
if order_by_dir:
parameters['orderByDir'] = order_by_dir
if published_only:
parameters['publishedOnly'] = 'true'
response = self._client.session.get(
self.endpoint_url, params=parameters
)
return self.process_response(response) | Get a list of items
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:param published_only: bool
:param minimal: bool
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/api.py#L118-L155 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class API(object):
_endpoint = ''
def __init__(self, client):
self._client = client
self.endpoint_url = '{base_url}/api/{endpoint}'.format(
base_url=self._client.base_url,
endpoint=self._endpoint.strip(' /')
)
@staticmethod
def process_response(response):
if response.ok:
return response.json()
try:
return response.json()
except ValueError:
# no json object could be decoded
return response.content
@staticmethod
def action_not_supported(action):
"""
Returns a not supported error
:param action: str
:return: dict
"""
return {
'error': {
'code': 500,
'message':
'{action} is not supported at this time'.format(action=action)
}
}
def get(self, obj_id):
"""
Get a single item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_published_list(
self, search='', start=0, limit=0, order_by='', order_by_dir='ASC'
):
"""
Proxy function to get_list with published_only set to True
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
return self.get_list(
search=search,
start=start,
limit=limit,
order_by=order_by,
order_by_dir=order_by_dir,
published_only=True
)
def create(self, parameters):
"""
Create a new item (if supported)
:param parameters: dict
:return: dict|str
"""
response = self._client.session.post(
'{url}/new'.format(url=self.endpoint_url), data=parameters
)
return self.process_response(response)
def edit(self, obj_id, parameters, create_if_not_exists=False):
"""
Edit an item with option to create if it doesn't exist
:param obj_id: int
:param create_if_not_exists: bool
:param parameters: dict
:return: dict|str
"""
if create_if_not_exists:
response = self._client.session.put(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
else:
response = self._client.session.patch(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
return self.process_response(response)
def delete(self, obj_id):
"""
Delete an item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.delete(
'{url}/{id}/delete'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/api.py | API.get_published_list | python | def get_published_list(
self, search='', start=0, limit=0, order_by='', order_by_dir='ASC'
):
return self.get_list(
search=search,
start=start,
limit=limit,
order_by=order_by,
order_by_dir=order_by_dir,
published_only=True
) | Proxy function to get_list with published_only set to True
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/api.py#L157-L176 | [
"def get_list(\n self,\n search='',\n start=0,\n limit=0,\n order_by='',\n order_by_dir='ASC',\n published_only=False,\n minimal=False\n):\n \"\"\"\n Get a list of items\n\n :param search: str\n :param start: int\n :param limit: int\n :param order_by: str\n :param order_... | class API(object):
_endpoint = ''
def __init__(self, client):
self._client = client
self.endpoint_url = '{base_url}/api/{endpoint}'.format(
base_url=self._client.base_url,
endpoint=self._endpoint.strip(' /')
)
@staticmethod
def process_response(response):
if response.ok:
return response.json()
try:
return response.json()
except ValueError:
# no json object could be decoded
return response.content
@staticmethod
def action_not_supported(action):
"""
Returns a not supported error
:param action: str
:return: dict
"""
return {
'error': {
'code': 500,
'message':
'{action} is not supported at this time'.format(action=action)
}
}
def get(self, obj_id):
"""
Get a single item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_list(
self,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC',
published_only=False,
minimal=False
):
"""
Get a list of items
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:param published_only: bool
:param minimal: bool
:return: dict|str
"""
parameters = {}
args = ['search', 'start', 'limit', 'minimal']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg]
if order_by:
parameters['orderBy'] = order_by
if order_by_dir:
parameters['orderByDir'] = order_by_dir
if published_only:
parameters['publishedOnly'] = 'true'
response = self._client.session.get(
self.endpoint_url, params=parameters
)
return self.process_response(response)
def create(self, parameters):
"""
Create a new item (if supported)
:param parameters: dict
:return: dict|str
"""
response = self._client.session.post(
'{url}/new'.format(url=self.endpoint_url), data=parameters
)
return self.process_response(response)
def edit(self, obj_id, parameters, create_if_not_exists=False):
"""
Edit an item with option to create if it doesn't exist
:param obj_id: int
:param create_if_not_exists: bool
:param parameters: dict
:return: dict|str
"""
if create_if_not_exists:
response = self._client.session.put(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
else:
response = self._client.session.patch(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
return self.process_response(response)
def delete(self, obj_id):
"""
Delete an item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.delete(
'{url}/{id}/delete'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/api.py | API.create | python | def create(self, parameters):
response = self._client.session.post(
'{url}/new'.format(url=self.endpoint_url), data=parameters
)
return self.process_response(response) | Create a new item (if supported)
:param parameters: dict
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/api.py#L178-L188 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class API(object):
_endpoint = ''
def __init__(self, client):
self._client = client
self.endpoint_url = '{base_url}/api/{endpoint}'.format(
base_url=self._client.base_url,
endpoint=self._endpoint.strip(' /')
)
@staticmethod
def process_response(response):
if response.ok:
return response.json()
try:
return response.json()
except ValueError:
# no json object could be decoded
return response.content
@staticmethod
def action_not_supported(action):
"""
Returns a not supported error
:param action: str
:return: dict
"""
return {
'error': {
'code': 500,
'message':
'{action} is not supported at this time'.format(action=action)
}
}
def get(self, obj_id):
"""
Get a single item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_list(
self,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC',
published_only=False,
minimal=False
):
"""
Get a list of items
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:param published_only: bool
:param minimal: bool
:return: dict|str
"""
parameters = {}
args = ['search', 'start', 'limit', 'minimal']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg]
if order_by:
parameters['orderBy'] = order_by
if order_by_dir:
parameters['orderByDir'] = order_by_dir
if published_only:
parameters['publishedOnly'] = 'true'
response = self._client.session.get(
self.endpoint_url, params=parameters
)
return self.process_response(response)
def get_published_list(
self, search='', start=0, limit=0, order_by='', order_by_dir='ASC'
):
"""
Proxy function to get_list with published_only set to True
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
return self.get_list(
search=search,
start=start,
limit=limit,
order_by=order_by,
order_by_dir=order_by_dir,
published_only=True
)
def edit(self, obj_id, parameters, create_if_not_exists=False):
"""
Edit an item with option to create if it doesn't exist
:param obj_id: int
:param create_if_not_exists: bool
:param parameters: dict
:return: dict|str
"""
if create_if_not_exists:
response = self._client.session.put(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
else:
response = self._client.session.patch(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
return self.process_response(response)
def delete(self, obj_id):
"""
Delete an item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.delete(
'{url}/{id}/delete'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/api.py | API.edit | python | def edit(self, obj_id, parameters, create_if_not_exists=False):
if create_if_not_exists:
response = self._client.session.put(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
else:
response = self._client.session.patch(
'{url}/{id}/edit'.format(
url=self.endpoint_url, id=obj_id
),
data=parameters
)
return self.process_response(response) | Edit an item with option to create if it doesn't exist
:param obj_id: int
:param create_if_not_exists: bool
:param parameters: dict
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/api.py#L190-L213 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class API(object):
_endpoint = ''
def __init__(self, client):
self._client = client
self.endpoint_url = '{base_url}/api/{endpoint}'.format(
base_url=self._client.base_url,
endpoint=self._endpoint.strip(' /')
)
@staticmethod
def process_response(response):
if response.ok:
return response.json()
try:
return response.json()
except ValueError:
# no json object could be decoded
return response.content
@staticmethod
def action_not_supported(action):
"""
Returns a not supported error
:param action: str
:return: dict
"""
return {
'error': {
'code': 500,
'message':
'{action} is not supported at this time'.format(action=action)
}
}
def get(self, obj_id):
"""
Get a single item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.get(
'{url}/{id}'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
def get_list(
self,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC',
published_only=False,
minimal=False
):
"""
Get a list of items
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:param published_only: bool
:param minimal: bool
:return: dict|str
"""
parameters = {}
args = ['search', 'start', 'limit', 'minimal']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg]
if order_by:
parameters['orderBy'] = order_by
if order_by_dir:
parameters['orderByDir'] = order_by_dir
if published_only:
parameters['publishedOnly'] = 'true'
response = self._client.session.get(
self.endpoint_url, params=parameters
)
return self.process_response(response)
def get_published_list(
self, search='', start=0, limit=0, order_by='', order_by_dir='ASC'
):
"""
Proxy function to get_list with published_only set to True
:param search: str
:param start: int
:param limit: int
:param order_by: str
:param order_by_dir: str
:return: dict|str
"""
return self.get_list(
search=search,
start=start,
limit=limit,
order_by=order_by,
order_by_dir=order_by_dir,
published_only=True
)
def create(self, parameters):
"""
Create a new item (if supported)
:param parameters: dict
:return: dict|str
"""
response = self._client.session.post(
'{url}/new'.format(url=self.endpoint_url), data=parameters
)
return self.process_response(response)
def delete(self, obj_id):
"""
Delete an item
:param obj_id: int
:return: dict|str
"""
response = self._client.session.delete(
'{url}/{id}/delete'.format(
url=self.endpoint_url, id=obj_id
)
)
return self.process_response(response)
|
divio/python-mautic | mautic/data.py | Data.get | python | def get(self, data_type, options=None):
if options is None:
options = {}
response = self._client.session.get(
'{url}/{type}'.format(
url=self.endpoint_url, type=data_type
),
params=options
)
return self.process_response(response) | Get a single item
:param data_type: str
:param options: dict
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/data.py#L10-L25 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Data(API):
_endpoint = 'data'
|
divio/python-mautic | mautic/stats.py | Stats.get | python | def get(self, table='', start=0, limit=0, order=None, where=None):
parameters = {}
args = ['start', 'limit', 'order', 'where']
for arg in args:
if arg in locals() and locals()[arg]:
parameters[arg] = locals()[arg]
response = self._client.session.get(
'{url}/{table}'.format(
url=self.endpoint_url, table=table
),
params=parameters
)
return self.process_response(response) | Get a list of stat items
:param table: str database table name
:param start: int
:param limit: int
:param order: list|tuple
:param where: list|tuple
:return: | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/stats.py#L10-L34 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Stats(API):
_endpoint = 'stats'
def delete(self, obj_id):
return self.action_not_supported('delete')
def get_list(
self,
search='',
start=0,
limit=0,
order_by='',
order_by_dir='ASC',
published_only=False,
minimal=False
):
return self.action_not_supported('get_list')
def create(self, parameters):
return self.action_not_supported('create')
def get_published_list(
self, search='', start=0, limit=0, order_by='', order_by_dir='ASC'
):
return self.action_not_supported('get_published_list')
def edit(self, obj_id, parameters, create_if_not_exists=False):
return self.action_not_supported('edit')
|
divio/python-mautic | mautic/segments.py | Segments.add_contact | python | def add_contact(self, segment_id, contact_id):
response = self._client.session.post(
'{url}/{segment_id}/contact/add/{contact_id}'.format(
url=self.endpoint_url,
segment_id=segment_id,
contact_id=contact_id
)
)
return self.process_response(response) | Add a contact to the segment
:param segment_id: int Segment ID
:param contact_id: int Contact ID
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/segments.py#L10-L26 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Segments(API):
_endpoint = 'segments'
|
divio/python-mautic | mautic/users.py | Users.check_permission | python | def check_permission(self, obj_id, permissions):
response = self._client.session.post(
'{url}/{id}/permissioncheck'.format(
url=self.endpoint_url, id=obj_id
),
data={'permissions': permissions}
)
return self.process_response(response) | Get list of permissions for a user
:param obj_id: int
:param permissions: str|list|tuple
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/users.py#L21-L35 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Users(API):
_endpoint = 'users'
def get_self(self):
"""
Get your (API) user
:return: dict|str
"""
response = self._client.session.get(
'{url}/self'.format(url=self.endpoint_url)
)
return self.process_response(response)
|
divio/python-mautic | mautic/point_triggers.py | PointTriggers.delete_trigger_events | python | def delete_trigger_events(self, trigger_id, event_ids):
response = self._client.session.delete(
'{url}/{trigger_id}/events/delete'.format(
url=self.endpoint_url, trigger_id=trigger_id
),
params={'events': event_ids}
)
return self.process_response(response) | Remove events from a point trigger
:param trigger_id: int
:param event_ids: list|tuple
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/point_triggers.py#L10-L25 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class PointTriggers(API):
_endpoint = 'points/triggers'
|
divio/python-mautic | mautic/forms.py | Forms.delete_fields | python | def delete_fields(self, form_id, field_ids):
response = self._client.session.delete(
'{url}/{form_id}/fields/delete'.format(
url=self.endpoint_url, form_id=form_id
),
params={'fields': field_ids}
)
return self.process_response(response) | Remove fields from a form
:param form_id: int
:param field_ids: list|tuple
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/forms.py#L10-L25 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Forms(API):
_endpoint = 'forms'
def delete_actions(self, form_id, action_ids):
"""
Remove actions from a form
:param form_id: int
:param action_ids: list|tuple
:return: dict|str
"""
response = self._client.session.delete(
'{url}/{form_id}/actions/delete'.format(
url=self.endpoint_url, form_id=form_id
),
params={'actions': action_ids}
)
return self.process_response(response)
|
divio/python-mautic | mautic/forms.py | Forms.delete_actions | python | def delete_actions(self, form_id, action_ids):
response = self._client.session.delete(
'{url}/{form_id}/actions/delete'.format(
url=self.endpoint_url, form_id=form_id
),
params={'actions': action_ids}
)
return self.process_response(response) | Remove actions from a form
:param form_id: int
:param action_ids: list|tuple
:return: dict|str | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/forms.py#L27-L42 | [
"def process_response(response):\n if response.ok:\n return response.json()\n try:\n return response.json()\n except ValueError:\n # no json object could be decoded\n return response.content\n"
] | class Forms(API):
_endpoint = 'forms'
def delete_fields(self, form_id, field_ids):
"""
Remove fields from a form
:param form_id: int
:param field_ids: list|tuple
:return: dict|str
"""
response = self._client.session.delete(
'{url}/{form_id}/fields/delete'.format(
url=self.endpoint_url, form_id=form_id
),
params={'fields': field_ids}
)
return self.process_response(response)
|
divio/python-mautic | mautic/utils.py | update_token_tempfile | python | def update_token_tempfile(token):
with open(tmp, 'w') as f:
f.write(json.dumps(token, indent=4)) | Example of function for token update | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/utils.py#L20-L25 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import json
import os
from tempfile import gettempdir
tmp = os.path.join(gettempdir(), 'mautic_creds.json')
def read_token_tempfile():
"""
Example of function for getting stored token
:return: token dict
"""
with open(tmp, 'r') as f:
return json.loads(f.read())
|
divio/python-mautic | mautic/files.py | Files.set_folder | python | def set_folder(self, folder='assets'):
folder = folder.replace('/', '.')
self._endpoint = 'files/{folder}'.format(folder=folder) | Changes the file folder to look at
:param folder: str [images, assets]
:return: None | train | https://github.com/divio/python-mautic/blob/1fbff629070200002373c5e94c75e01561df418a/mautic/files.py#L10-L18 | null | class Files(API):
_endpoint = 'files/images'
def edit(self, obj_id, parameters=None, create_if_not_exists=False):
return self.action_not_supported('edit')
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/request.py | Request.observe_id | python | def observe_id(self):
if self._observe_id is None:
hasher = hashlib.sha256()
hasher.update(self.viewset_class.__module__.encode('utf8'))
hasher.update(self.viewset_class.__name__.encode('utf8'))
hasher.update(self.viewset_method.encode('utf8'))
# Arguments do not need to be taken into account as they are
# derived from the request path, which is already accounted for.
for key in sorted(self.GET.keys()):
hasher.update(key.encode('utf8'))
hasher.update(self.GET[key].encode('utf8'))
hasher.update(self.path.encode('utf8'))
hasher.update(self.path_info.encode('utf8'))
if self._force_auth_user is not None:
hasher.update(
(str(self._force_auth_user.id) or 'anonymous').encode('utf8')
)
else:
hasher.update(b'anonymous')
self._observe_id = hasher.hexdigest()
return self._observe_id | Unique identifier that identifies the observer. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/request.py#L43-L65 | null | class Request(http_request.HttpRequest):
"""Request used by the query observer to interact with the viewsets.
This request class is picklable.
"""
def __init__(self, viewset_class, viewset_method, request, args=None, kwargs=None):
"""
:param request: The original API request
"""
super().__init__()
self.viewset_class = viewset_class
self.viewset_method = viewset_method
self.args = args or []
self.kwargs = kwargs or {}
# Copy relevant fields from the original request.
self.method = request.method
self.META = {}
for key, value in request._request.META.items():
if isinstance(value, str):
self.META[key] = value
self.GET = request._request.GET.copy()
if OBSERVABLE_QUERY_PARAMETER in self.GET:
# Remove the original observe query parameter.
del self.GET[OBSERVABLE_QUERY_PARAMETER]
self.path = request._request.path
self.path_info = request._request.path_info
self._force_auth_user = request.user
self._observe_id = None
@property
def __getstate__(self):
return {
'viewset_class': self.viewset_class,
'viewset_method': self.viewset_method,
'args': self.args,
'kwargs': self.kwargs,
'method': self.method,
'META': self.META,
'GET': self.GET,
'path': self.path,
'path_info': self.path_info,
'user': self._force_auth_user,
'observe_id': self._observe_id,
}
def __setstate__(self, state):
self.viewset_class = state['viewset_class']
self.viewset_method = state['viewset_method']
self.args = state['args']
self.kwargs = state['kwargs']
self.method = state['method']
self.META = state['META']
self.GET = state['GET']
self.path = state['path']
self.path_info = state['path_info']
self._force_auth_user = state['user']
self._observe_id = state['observe_id']
def __repr__(self):
return '<Request: viewset={viewset} method={method} path={path} query={get}>'.format(
viewset=repr(self.viewset_class),
method=self.viewset_method,
path=self.path,
get=repr(self.GET),
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/views.py | QueryObserverUnsubscribeView.post | python | def post(self, request):
try:
observer_id = request.query_params['observer']
session_id = request.query_params['subscriber']
except KeyError:
return response.Response(status=400)
observer.remove_subscriber(session_id, observer_id)
return response.Response() | Handle a query observer unsubscription request. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/views.py#L7-L16 | [
"def remove_subscriber(session_id, observer_id):\n \"\"\"Remove subscriber from the given observer.\n\n :param session_id: Subscriber's session identifier\n :param observer_id: Observer identifier\n \"\"\"\n models.Observer.subscribers.through.objects.filter(\n subscriber_id=session_id, observ... | class QueryObserverUnsubscribeView(views.APIView):
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/signals.py | notify_observers | python | def notify_observers(table, kind, primary_key=None):
if IN_MIGRATIONS:
return
# Don't propagate events when there are no observers to receive them.
if not Observer.objects.filter(dependencies__table=table).exists():
return
def handler():
"""Send a notification to the given channel."""
try:
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_ORM_NOTIFY,
'table': table,
'kind': kind,
'primary_key': str(primary_key),
},
)
except ChannelFull:
logger.exception("Unable to notify workers.")
batcher = PrioritizedBatcher.global_instance()
if batcher.is_started:
# If a batch is open, queue the send via the batcher.
batcher.add(
'rest_framework_reactive', handler, group_by=(table, kind, primary_key)
)
else:
# If no batch is open, invoke immediately.
handler() | Transmit ORM table change notification.
:param table: Name of the table that has changed
:param kind: Change type
:param primary_key: Primary key of the affected instance | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/signals.py#L35-L73 | null | import logging
from asgiref.sync import async_to_sync
from channels.exceptions import ChannelFull
from channels.layers import get_channel_layer
from django import dispatch
from django.db import transaction
from django.db.models import signals as model_signals
from django_priority_batch import PrioritizedBatcher
from .models import Observer
from .protocol import *
# Logger.
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Global 'in migrations' flag to skip certain operations during migrations.
IN_MIGRATIONS = False
@dispatch.receiver(model_signals.pre_migrate)
def model_pre_migrate(*args, **kwargs):
"""Set 'in migrations' flag."""
global IN_MIGRATIONS
IN_MIGRATIONS = True
@dispatch.receiver(model_signals.post_migrate)
def model_post_migrate(*args, **kwargs):
"""Clear 'in migrations' flag."""
global IN_MIGRATIONS
IN_MIGRATIONS = False
@dispatch.receiver(model_signals.post_save)
def model_post_save(sender, instance, created=False, **kwargs):
"""Signal emitted after any model is saved via Django ORM.
:param sender: Model class that was saved
:param instance: The actual instance that was saved
:param created: True if a new row was created
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
if created:
notify_observers(table, ORM_NOTIFY_KIND_CREATE, instance.pk)
else:
notify_observers(table, ORM_NOTIFY_KIND_UPDATE, instance.pk)
transaction.on_commit(notify)
@dispatch.receiver(model_signals.post_delete)
def model_post_delete(sender, instance, **kwargs):
"""Signal emitted after any model is deleted via Django ORM.
:param sender: Model class that was deleted
:param instance: The actual instance that was removed
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
notify_observers(table, ORM_NOTIFY_KIND_DELETE, instance.pk)
transaction.on_commit(notify)
@dispatch.receiver(model_signals.m2m_changed)
def model_m2m_changed(sender, instance, action, **kwargs):
"""
Signal emitted after any M2M relation changes via Django ORM.
:param sender: M2M intermediate model
:param instance: The actual instance that was saved
:param action: M2M action
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
if action == 'post_add':
notify_observers(table, ORM_NOTIFY_KIND_CREATE)
elif action in ('post_remove', 'post_clear'):
notify_observers(table, ORM_NOTIFY_KIND_DELETE)
transaction.on_commit(notify)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/signals.py | model_post_save | python | def model_post_save(sender, instance, created=False, **kwargs):
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
if created:
notify_observers(table, ORM_NOTIFY_KIND_CREATE, instance.pk)
else:
notify_observers(table, ORM_NOTIFY_KIND_UPDATE, instance.pk)
transaction.on_commit(notify) | Signal emitted after any model is saved via Django ORM.
:param sender: Model class that was saved
:param instance: The actual instance that was saved
:param created: True if a new row was created | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/signals.py#L77-L96 | null | import logging
from asgiref.sync import async_to_sync
from channels.exceptions import ChannelFull
from channels.layers import get_channel_layer
from django import dispatch
from django.db import transaction
from django.db.models import signals as model_signals
from django_priority_batch import PrioritizedBatcher
from .models import Observer
from .protocol import *
# Logger.
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Global 'in migrations' flag to skip certain operations during migrations.
IN_MIGRATIONS = False
@dispatch.receiver(model_signals.pre_migrate)
def model_pre_migrate(*args, **kwargs):
"""Set 'in migrations' flag."""
global IN_MIGRATIONS
IN_MIGRATIONS = True
@dispatch.receiver(model_signals.post_migrate)
def model_post_migrate(*args, **kwargs):
"""Clear 'in migrations' flag."""
global IN_MIGRATIONS
IN_MIGRATIONS = False
def notify_observers(table, kind, primary_key=None):
"""Transmit ORM table change notification.
:param table: Name of the table that has changed
:param kind: Change type
:param primary_key: Primary key of the affected instance
"""
if IN_MIGRATIONS:
return
# Don't propagate events when there are no observers to receive them.
if not Observer.objects.filter(dependencies__table=table).exists():
return
def handler():
"""Send a notification to the given channel."""
try:
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_ORM_NOTIFY,
'table': table,
'kind': kind,
'primary_key': str(primary_key),
},
)
except ChannelFull:
logger.exception("Unable to notify workers.")
batcher = PrioritizedBatcher.global_instance()
if batcher.is_started:
# If a batch is open, queue the send via the batcher.
batcher.add(
'rest_framework_reactive', handler, group_by=(table, kind, primary_key)
)
else:
# If no batch is open, invoke immediately.
handler()
@dispatch.receiver(model_signals.post_save)
@dispatch.receiver(model_signals.post_delete)
def model_post_delete(sender, instance, **kwargs):
"""Signal emitted after any model is deleted via Django ORM.
:param sender: Model class that was deleted
:param instance: The actual instance that was removed
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
notify_observers(table, ORM_NOTIFY_KIND_DELETE, instance.pk)
transaction.on_commit(notify)
@dispatch.receiver(model_signals.m2m_changed)
def model_m2m_changed(sender, instance, action, **kwargs):
"""
Signal emitted after any M2M relation changes via Django ORM.
:param sender: M2M intermediate model
:param instance: The actual instance that was saved
:param action: M2M action
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
if action == 'post_add':
notify_observers(table, ORM_NOTIFY_KIND_CREATE)
elif action in ('post_remove', 'post_clear'):
notify_observers(table, ORM_NOTIFY_KIND_DELETE)
transaction.on_commit(notify)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/signals.py | model_post_delete | python | def model_post_delete(sender, instance, **kwargs):
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
notify_observers(table, ORM_NOTIFY_KIND_DELETE, instance.pk)
transaction.on_commit(notify) | Signal emitted after any model is deleted via Django ORM.
:param sender: Model class that was deleted
:param instance: The actual instance that was removed | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/signals.py#L100-L115 | null | import logging
from asgiref.sync import async_to_sync
from channels.exceptions import ChannelFull
from channels.layers import get_channel_layer
from django import dispatch
from django.db import transaction
from django.db.models import signals as model_signals
from django_priority_batch import PrioritizedBatcher
from .models import Observer
from .protocol import *
# Logger.
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Global 'in migrations' flag to skip certain operations during migrations.
IN_MIGRATIONS = False
@dispatch.receiver(model_signals.pre_migrate)
def model_pre_migrate(*args, **kwargs):
"""Set 'in migrations' flag."""
global IN_MIGRATIONS
IN_MIGRATIONS = True
@dispatch.receiver(model_signals.post_migrate)
def model_post_migrate(*args, **kwargs):
"""Clear 'in migrations' flag."""
global IN_MIGRATIONS
IN_MIGRATIONS = False
def notify_observers(table, kind, primary_key=None):
"""Transmit ORM table change notification.
:param table: Name of the table that has changed
:param kind: Change type
:param primary_key: Primary key of the affected instance
"""
if IN_MIGRATIONS:
return
# Don't propagate events when there are no observers to receive them.
if not Observer.objects.filter(dependencies__table=table).exists():
return
def handler():
"""Send a notification to the given channel."""
try:
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_ORM_NOTIFY,
'table': table,
'kind': kind,
'primary_key': str(primary_key),
},
)
except ChannelFull:
logger.exception("Unable to notify workers.")
batcher = PrioritizedBatcher.global_instance()
if batcher.is_started:
# If a batch is open, queue the send via the batcher.
batcher.add(
'rest_framework_reactive', handler, group_by=(table, kind, primary_key)
)
else:
# If no batch is open, invoke immediately.
handler()
@dispatch.receiver(model_signals.post_save)
def model_post_save(sender, instance, created=False, **kwargs):
"""Signal emitted after any model is saved via Django ORM.
:param sender: Model class that was saved
:param instance: The actual instance that was saved
:param created: True if a new row was created
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
if created:
notify_observers(table, ORM_NOTIFY_KIND_CREATE, instance.pk)
else:
notify_observers(table, ORM_NOTIFY_KIND_UPDATE, instance.pk)
transaction.on_commit(notify)
@dispatch.receiver(model_signals.post_delete)
@dispatch.receiver(model_signals.m2m_changed)
def model_m2m_changed(sender, instance, action, **kwargs):
"""
Signal emitted after any M2M relation changes via Django ORM.
:param sender: M2M intermediate model
:param instance: The actual instance that was saved
:param action: M2M action
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
if action == 'post_add':
notify_observers(table, ORM_NOTIFY_KIND_CREATE)
elif action in ('post_remove', 'post_clear'):
notify_observers(table, ORM_NOTIFY_KIND_DELETE)
transaction.on_commit(notify)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/signals.py | model_m2m_changed | python | def model_m2m_changed(sender, instance, action, **kwargs):
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
if action == 'post_add':
notify_observers(table, ORM_NOTIFY_KIND_CREATE)
elif action in ('post_remove', 'post_clear'):
notify_observers(table, ORM_NOTIFY_KIND_DELETE)
transaction.on_commit(notify) | Signal emitted after any M2M relation changes via Django ORM.
:param sender: M2M intermediate model
:param instance: The actual instance that was saved
:param action: M2M action | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/signals.py#L119-L139 | null | import logging
from asgiref.sync import async_to_sync
from channels.exceptions import ChannelFull
from channels.layers import get_channel_layer
from django import dispatch
from django.db import transaction
from django.db.models import signals as model_signals
from django_priority_batch import PrioritizedBatcher
from .models import Observer
from .protocol import *
# Logger.
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Global 'in migrations' flag to skip certain operations during migrations.
IN_MIGRATIONS = False
@dispatch.receiver(model_signals.pre_migrate)
def model_pre_migrate(*args, **kwargs):
"""Set 'in migrations' flag."""
global IN_MIGRATIONS
IN_MIGRATIONS = True
@dispatch.receiver(model_signals.post_migrate)
def model_post_migrate(*args, **kwargs):
"""Clear 'in migrations' flag."""
global IN_MIGRATIONS
IN_MIGRATIONS = False
def notify_observers(table, kind, primary_key=None):
"""Transmit ORM table change notification.
:param table: Name of the table that has changed
:param kind: Change type
:param primary_key: Primary key of the affected instance
"""
if IN_MIGRATIONS:
return
# Don't propagate events when there are no observers to receive them.
if not Observer.objects.filter(dependencies__table=table).exists():
return
def handler():
"""Send a notification to the given channel."""
try:
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_ORM_NOTIFY,
'table': table,
'kind': kind,
'primary_key': str(primary_key),
},
)
except ChannelFull:
logger.exception("Unable to notify workers.")
batcher = PrioritizedBatcher.global_instance()
if batcher.is_started:
# If a batch is open, queue the send via the batcher.
batcher.add(
'rest_framework_reactive', handler, group_by=(table, kind, primary_key)
)
else:
# If no batch is open, invoke immediately.
handler()
@dispatch.receiver(model_signals.post_save)
def model_post_save(sender, instance, created=False, **kwargs):
"""Signal emitted after any model is saved via Django ORM.
:param sender: Model class that was saved
:param instance: The actual instance that was saved
:param created: True if a new row was created
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
if created:
notify_observers(table, ORM_NOTIFY_KIND_CREATE, instance.pk)
else:
notify_observers(table, ORM_NOTIFY_KIND_UPDATE, instance.pk)
transaction.on_commit(notify)
@dispatch.receiver(model_signals.post_delete)
def model_post_delete(sender, instance, **kwargs):
"""Signal emitted after any model is deleted via Django ORM.
:param sender: Model class that was deleted
:param instance: The actual instance that was removed
"""
if sender._meta.app_label == 'rest_framework_reactive':
# Ignore own events.
return
def notify():
table = sender._meta.db_table
notify_observers(table, ORM_NOTIFY_KIND_DELETE, instance.pk)
transaction.on_commit(notify)
@dispatch.receiver(model_signals.m2m_changed)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/consumers.py | MainConsumer.observer_orm_notify | python | async def observer_orm_notify(self, message):
@database_sync_to_async
def get_observers(table):
# Find all observers with dependencies on the given table.
return list(
Observer.objects.filter(
dependencies__table=table, subscribers__isnull=False
)
.distinct('pk')
.values_list('pk', flat=True)
)
observers_ids = await get_observers(message['table'])
for observer_id in observers_ids:
await self.channel_layer.send(
CHANNEL_WORKER, {'type': TYPE_EVALUATE, 'observer': observer_id}
) | Process notification from ORM. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/consumers.py#L28-L47 | null | class MainConsumer(AsyncConsumer):
"""Consumer for polling observers."""
async def observer_poll(self, message):
"""Poll observer after a delay."""
# Sleep until we need to notify the observer.
await asyncio.sleep(message['interval'])
# Dispatch task to evaluate the observable.
await self.channel_layer.send(
CHANNEL_WORKER, {'type': TYPE_EVALUATE, 'observer': message['observer']}
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/consumers.py | MainConsumer.observer_poll | python | async def observer_poll(self, message):
# Sleep until we need to notify the observer.
await asyncio.sleep(message['interval'])
# Dispatch task to evaluate the observable.
await self.channel_layer.send(
CHANNEL_WORKER, {'type': TYPE_EVALUATE, 'observer': message['observer']}
) | Poll observer after a delay. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/consumers.py#L49-L57 | null | class MainConsumer(AsyncConsumer):
"""Consumer for polling observers."""
async def observer_orm_notify(self, message):
"""Process notification from ORM."""
@database_sync_to_async
def get_observers(table):
# Find all observers with dependencies on the given table.
return list(
Observer.objects.filter(
dependencies__table=table, subscribers__isnull=False
)
.distinct('pk')
.values_list('pk', flat=True)
)
observers_ids = await get_observers(message['table'])
for observer_id in observers_ids:
await self.channel_layer.send(
CHANNEL_WORKER, {'type': TYPE_EVALUATE, 'observer': observer_id}
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/consumers.py | WorkerConsumer.observer_evaluate | python | async def observer_evaluate(self, message):
observer_id = message['observer']
throttle_rate = get_queryobserver_settings()['throttle_rate']
if throttle_rate <= 0:
await self._evaluate(observer_id)
return
cache_key = throttle_cache_key(observer_id)
try:
count = cache.incr(cache_key)
# Ignore if delayed observer already scheduled.
if count == 2:
await self.channel_layer.send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': observer_id,
'interval': throttle_rate,
},
)
except ValueError:
count = cache.get_or_set(cache_key, default=1, timeout=throttle_rate)
# Ignore if cache was set and increased in another thread.
if count == 1:
await self._evaluate(observer_id) | Execute observer evaluation on the worker or throttle. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/consumers.py#L93-L118 | [
"def get_queryobserver_settings():\n \"\"\"Query observer connection configuration.\"\"\"\n defaults = {\n # Observers going over these limits will emit warnings.\n 'warnings': {'max_result_length': 1000, 'max_processing_time': 1.0},\n # Observers going over these limits will be stopped.\... | class WorkerConsumer(AsyncConsumer):
"""Worker consumer."""
def __init__(self, *args, **kwargs):
"""Construct observer worker consumer."""
self._executor_cache = collections.OrderedDict()
super().__init__(*args, **kwargs)
async def _evaluate(self, observer_id):
# Get Observer from database.
@database_sync_to_async
def get_observer(observer_id):
try:
return Observer.objects.only('pk', 'request').get(pk=observer_id)
except Observer.DoesNotExist:
return None
observer = await get_observer(observer_id)
if not observer:
return
# Get QueryObserver executor from cache and evaluate.
try:
executor = self._executor_cache[observer.pk]
self._executor_cache.move_to_end(observer.pk)
except KeyError:
executor = QueryObserver(pickle.loads(observer.request))
self._executor_cache[observer.pk] = executor
if len(self._executor_cache) > MAX_CACHED_EXECUTORS:
self._executor_cache.popitem(last=False)
await executor.evaluate()
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/consumers.py | ClientConsumer.websocket_connect | python | def websocket_connect(self, message):
self.session_id = self.scope['url_route']['kwargs']['subscriber_id']
super().websocket_connect(message)
# Create new subscriber object.
Subscriber.objects.get_or_create(session_id=self.session_id) | Called when WebSocket connection is established. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/consumers.py#L124-L130 | null | class ClientConsumer(JsonWebsocketConsumer):
"""Client consumer."""
@property
def groups(self):
"""Groups this channel should add itself to."""
if not hasattr(self, 'session_id'):
return []
return [GROUP_SESSIONS.format(session_id=self.session_id)]
def disconnect(self, code):
"""Called when WebSocket connection is closed."""
Subscriber.objects.filter(session_id=self.session_id).delete()
def observer_update(self, message):
"""Called when update from observer is received."""
# Demultiplex observer update into multiple messages.
for action in ('added', 'changed', 'removed'):
for item in message[action]:
self.send_json(
{
'msg': action,
'observer': message['observer'],
'primary_key': message['primary_key'],
'order': item['order'],
'item': item['data'],
}
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/consumers.py | ClientConsumer.disconnect | python | def disconnect(self, code):
Subscriber.objects.filter(session_id=self.session_id).delete() | Called when WebSocket connection is closed. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/consumers.py#L140-L142 | null | class ClientConsumer(JsonWebsocketConsumer):
"""Client consumer."""
def websocket_connect(self, message):
"""Called when WebSocket connection is established."""
self.session_id = self.scope['url_route']['kwargs']['subscriber_id']
super().websocket_connect(message)
# Create new subscriber object.
Subscriber.objects.get_or_create(session_id=self.session_id)
@property
def groups(self):
"""Groups this channel should add itself to."""
if not hasattr(self, 'session_id'):
return []
return [GROUP_SESSIONS.format(session_id=self.session_id)]
def observer_update(self, message):
"""Called when update from observer is received."""
# Demultiplex observer update into multiple messages.
for action in ('added', 'changed', 'removed'):
for item in message[action]:
self.send_json(
{
'msg': action,
'observer': message['observer'],
'primary_key': message['primary_key'],
'order': item['order'],
'item': item['data'],
}
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/consumers.py | ClientConsumer.observer_update | python | def observer_update(self, message):
# Demultiplex observer update into multiple messages.
for action in ('added', 'changed', 'removed'):
for item in message[action]:
self.send_json(
{
'msg': action,
'observer': message['observer'],
'primary_key': message['primary_key'],
'order': item['order'],
'item': item['data'],
}
) | Called when update from observer is received. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/consumers.py#L144-L157 | null | class ClientConsumer(JsonWebsocketConsumer):
"""Client consumer."""
def websocket_connect(self, message):
"""Called when WebSocket connection is established."""
self.session_id = self.scope['url_route']['kwargs']['subscriber_id']
super().websocket_connect(message)
# Create new subscriber object.
Subscriber.objects.get_or_create(session_id=self.session_id)
@property
def groups(self):
"""Groups this channel should add itself to."""
if not hasattr(self, 'session_id'):
return []
return [GROUP_SESSIONS.format(session_id=self.session_id)]
def disconnect(self, code):
"""Called when WebSocket connection is closed."""
Subscriber.objects.filter(session_id=self.session_id).delete()
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/observer.py | remove_subscriber | python | def remove_subscriber(session_id, observer_id):
models.Observer.subscribers.through.objects.filter(
subscriber_id=session_id, observer_id=observer_id
).delete() | Remove subscriber from the given observer.
:param session_id: Subscriber's session identifier
:param observer_id: Observer identifier | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/observer.py#L473-L481 | null | import collections
import logging
import pickle
import time
from asgiref.sync import async_to_sync
from channels.db import database_sync_to_async
from channels.layers import get_channel_layer
from django.core import exceptions as django_exceptions
from django.db import IntegrityError, connection, transaction
from django.http import Http404
from django.utils import timezone
from rest_framework import request as api_request
from . import exceptions, models
from .connection import get_queryobserver_settings
from .protocol import *
# Logger.
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Observable method options attribute name prefix.
OBSERVABLE_OPTIONS_PREFIX = 'observable_'
# Maximum number of retries in case of concurrent observer creates.
MAX_INTEGRITY_ERROR_RETRIES = 3
class Options:
"""Query observer options."""
# Valid change detection types.
CHANGE_DETECTION_PUSH = 'push'
CHANGE_DETECTION_POLL = 'poll'
def __init__(self, viewset, viewset_method):
self._viewset = viewset
self._viewset_method = viewset_method
# Determine the primary key.
self.primary_key = self.get_option('primary_key')
if self.primary_key is None:
# Primary key attribute is not defined, attempt to autodiscover it from the queryset.
try:
self.primary_key = viewset.get_queryset().model._meta.pk.name
except AssertionError:
# No queryset is defined.
raise exceptions.MissingPrimaryKey(
"Observable method does not define a primary key and the viewset "
"does not provide a queryset. Define a queryset or use the primary_key "
"decorator."
)
# Determine change detection type.
self.change_detection = self.get_option(
'change_detection', Options.CHANGE_DETECTION_PUSH
)
self.poll_interval = self.get_option('poll_interval')
def get_option(self, name, default=None):
return getattr(
self._viewset_method,
'{}{}'.format(OBSERVABLE_OPTIONS_PREFIX, name),
default,
)
class QueryObserver:
"""Query observer.
A query observer observes a specific viewset for changes and propagates these
changes to all interested subscribers.
"""
def __init__(self, request):
"""Create new query observer.
:param request: A `queryobserver.request.Request` instance
"""
# Obtain a serializer by asking the viewset to provide one. We instantiate the
# viewset with a fake request, so that the viewset methods work as expected.
viewset = request.viewset_class()
viewset.request = api_request.Request(request)
viewset.request.method = request.method
viewset.format_kwarg = None
viewset.args = request.args
viewset.kwargs = request.kwargs
self._request = request
self._viewset = viewset
self._viewset_method = getattr(viewset, request.viewset_method)
self._meta = Options(viewset, self._viewset_method)
@property
def id(self):
"""Unique observer identifier."""
return self._request.observe_id
def _get_logging_extra(self, duration=None, results=None):
"""Extra information for logger."""
return {
'duration': duration,
'results': results,
'observer_id': self.id,
'viewset': '{}.{}'.format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
),
'method': self._request.viewset_method,
'path': self._request.path,
'get': self._request.GET,
}
def _get_logging_id(self):
"""Get logging identifier."""
return "{}.{}/{}".format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
self._request.viewset_method,
)
def _warning(self, msg, duration=None, results=None):
"""Log warnings."""
logger.warning(
"{} ({})".format(msg, self._get_logging_id()),
extra=self._get_logging_extra(duration=duration, results=results),
)
def subscribe(self, session_id, dependencies=None):
"""Initialize observer and register subscriber.
:param session_id: Subscriber's session identifier
:param dependencies: List of ORM to register as dependencies for orm_notify
"""
try:
change_detection = self._meta.change_detection
if change_detection not in [
Options.CHANGE_DETECTION_PUSH,
Options.CHANGE_DETECTION_POLL,
]:
raise NotImplementedError(
"Change detection mechanism '{}' not implemented.".format(
change_detection
)
)
viewset_results = self._viewset_results()
poll_interval = (
self._meta.poll_interval
if change_detection == Options.CHANGE_DETECTION_POLL
else None
)
# Subscribe to observer in a single query. First, create an
# observer, then create a subscriber, and finally subscribe to
# the observer. If already subscribed, ignore the conflict.
for retry in range(MAX_INTEGRITY_ERROR_RETRIES):
is_subscribed = False
cursor = connection.cursor()
try:
cursor.execute(
"""
WITH inserted_observer AS (
INSERT into {observer_table} ("id", "request", "poll_interval")
VALUES (%(observer_id)s, %(request)s, %(poll_interval)s)
ON CONFLICT DO NOTHING
), inserted_subscriber AS (
INSERT into {subscriber_table} ("session_id", "created")
VALUES (%(subscriber_id)s, NOW())
ON CONFLICT DO NOTHING
)
INSERT INTO {observer_subscribers_table} ("observer_id", "subscriber_id")
VALUES (%(observer_id)s, %(subscriber_id)s)
""".format(
observer_table=models.Observer._meta.db_table,
subscriber_table=models.Subscriber._meta.db_table,
observer_subscribers_table=models.Observer.subscribers.through._meta.db_table,
),
params={
'observer_id': self.id,
'request': pickle.dumps(self._request),
'poll_interval': poll_interval,
'subscriber_id': session_id,
},
)
is_subscribed = True
except IntegrityError as err:
msg = str(err)
if (
'Key (observer_id, subscriber_id)' in msg
and 'already exists' in msg
):
# Subscriber already subscribed, we're good.
is_subscribed = True
elif (
'Key (observer_id)' in msg or 'Key (subscriber_id)' in msg
) and 'not present in table' in msg:
# Could not subscribe because observer, subscriber or
# both are missing, retry.
if retry == MAX_INTEGRITY_ERROR_RETRIES - 1:
raise
else:
raise
finally:
cursor.close()
if is_subscribed:
break
# Determine who should notify us based on the configured change
# detection mechanism.
if change_detection == Options.CHANGE_DETECTION_PUSH:
if dependencies:
tables = [model._meta.db_table for model in dependencies]
else:
tables = [self._viewset.get_queryset().model._meta.db_table]
# Register table dependencies for push observables.
for table in tables:
try:
models.Dependency.objects.get_or_create(
observer_id=self.id, table=table
)
except models.Observer.DoesNotExist:
# The observer was removed before dependency tables
# were created.
return viewset_results
elif self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
self._evaluate(viewset_results)
except Exception:
logger.exception(
"Error while subscribing to observer ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(),
)
return viewset_results
async def evaluate(self):
"""Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
"""
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
)
def _viewset_results(self):
"""Parse results from the viewset response."""
results = []
try:
response = self._viewset_method(
self._viewset.request, *self._request.args, **self._request.kwargs
)
if response.status_code == 200:
results = response.data
if not isinstance(results, list):
if isinstance(results, dict):
# XXX: This can incidently match if a single
# object has results key
if 'results' in results and isinstance(
results['results'], list
):
# Support paginated results.
results = results['results']
else:
results.setdefault(self._meta.primary_key, 1)
results = [collections.OrderedDict(results)]
else:
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
except Http404:
pass
except django_exceptions.ObjectDoesNotExist:
# The evaluation may fail when certain dependent objects (like users) are removed
# from the database. In this case, the observer is stopped.
pass
return results
def _evaluate(self, viewset_results=None):
"""Evaluate query observer.
:param viewset_results: Objects returned by the viewset query
"""
if viewset_results is None:
viewset_results = self._viewset_results()
try:
observer = models.Observer.objects.get(id=self.id)
# Do not evaluate the observer if there are no subscribers
if observer.subscribers.count() == 0:
return (None, None, None)
# Update last evaluation time.
models.Observer.objects.filter(id=self.id).update(
last_evaluation=timezone.now()
)
# Log viewsets with too much output.
max_result = get_queryobserver_settings()['warnings']['max_result_length']
if len(viewset_results) > max_result:
self._warning(
"Observed viewset returns too many results",
results=len(viewset_results),
)
new_results = collections.OrderedDict()
for order, item in enumerate(viewset_results):
if not isinstance(item, dict):
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
item = {'order': order, 'data': item}
try:
new_results[str(item['data'][self._meta.primary_key])] = item
except KeyError:
raise KeyError(
"Observable view did not return primary key field '{}'!".format(
self._meta.primary_key
)
)
# Process difference between old results and new results.
added, changed = [], []
new_ids = list(new_results.keys())
removed_qs = observer.items.exclude(primary_key__in=new_results.keys())
removed = list(removed_qs.values('order', 'data'))
maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())
with transaction.atomic():
# Removed items.
removed_qs.delete()
# Defer unique ordering constraint before processing order updates.
# NOTE: The name of the constrait is generated by Django ORM.
with connection.cursor() as cursor:
cursor.execute(
"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED"
)
# Changed items.
for item_id, old_order, old_data in maybe_changed_qs.values_list(
'primary_key', 'order', 'data'
):
new_item = new_results[item_id]
new_ids.remove(item_id)
if new_item['data'] != old_data:
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
data=new_item['data'], order=new_item['order']
)
elif new_item['order'] != old_order:
# TODO: If only order has changed, don't transmit
# full data (needs frontend support).
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
order=new_item['order']
)
# Added items.
for item_id in new_ids:
item = new_results[item_id]
added.append(item)
observer.items.create(
primary_key=item_id, order=item['order'], data=item['data']
)
return (added, changed, removed)
except models.Observer.DoesNotExist:
# Observer removed, ignore evaluation
return (None, None, None)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return '<QueryObserver: id={id} request={request}>'.format(
id=self.id, request=repr(self._request)
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/observer.py | QueryObserver._get_logging_extra | python | def _get_logging_extra(self, duration=None, results=None):
return {
'duration': duration,
'results': results,
'observer_id': self.id,
'viewset': '{}.{}'.format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
),
'method': self._request.viewset_method,
'path': self._request.path,
'get': self._request.GET,
} | Extra information for logger. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/observer.py#L98-L111 | null | class QueryObserver:
"""Query observer.
A query observer observes a specific viewset for changes and propagates these
changes to all interested subscribers.
"""
def __init__(self, request):
"""Create new query observer.
:param request: A `queryobserver.request.Request` instance
"""
# Obtain a serializer by asking the viewset to provide one. We instantiate the
# viewset with a fake request, so that the viewset methods work as expected.
viewset = request.viewset_class()
viewset.request = api_request.Request(request)
viewset.request.method = request.method
viewset.format_kwarg = None
viewset.args = request.args
viewset.kwargs = request.kwargs
self._request = request
self._viewset = viewset
self._viewset_method = getattr(viewset, request.viewset_method)
self._meta = Options(viewset, self._viewset_method)
@property
def id(self):
"""Unique observer identifier."""
return self._request.observe_id
def _get_logging_id(self):
"""Get logging identifier."""
return "{}.{}/{}".format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
self._request.viewset_method,
)
def _warning(self, msg, duration=None, results=None):
"""Log warnings."""
logger.warning(
"{} ({})".format(msg, self._get_logging_id()),
extra=self._get_logging_extra(duration=duration, results=results),
)
def subscribe(self, session_id, dependencies=None):
"""Initialize observer and register subscriber.
:param session_id: Subscriber's session identifier
:param dependencies: List of ORM to register as dependencies for orm_notify
"""
try:
change_detection = self._meta.change_detection
if change_detection not in [
Options.CHANGE_DETECTION_PUSH,
Options.CHANGE_DETECTION_POLL,
]:
raise NotImplementedError(
"Change detection mechanism '{}' not implemented.".format(
change_detection
)
)
viewset_results = self._viewset_results()
poll_interval = (
self._meta.poll_interval
if change_detection == Options.CHANGE_DETECTION_POLL
else None
)
# Subscribe to observer in a single query. First, create an
# observer, then create a subscriber, and finally subscribe to
# the observer. If already subscribed, ignore the conflict.
for retry in range(MAX_INTEGRITY_ERROR_RETRIES):
is_subscribed = False
cursor = connection.cursor()
try:
cursor.execute(
"""
WITH inserted_observer AS (
INSERT into {observer_table} ("id", "request", "poll_interval")
VALUES (%(observer_id)s, %(request)s, %(poll_interval)s)
ON CONFLICT DO NOTHING
), inserted_subscriber AS (
INSERT into {subscriber_table} ("session_id", "created")
VALUES (%(subscriber_id)s, NOW())
ON CONFLICT DO NOTHING
)
INSERT INTO {observer_subscribers_table} ("observer_id", "subscriber_id")
VALUES (%(observer_id)s, %(subscriber_id)s)
""".format(
observer_table=models.Observer._meta.db_table,
subscriber_table=models.Subscriber._meta.db_table,
observer_subscribers_table=models.Observer.subscribers.through._meta.db_table,
),
params={
'observer_id': self.id,
'request': pickle.dumps(self._request),
'poll_interval': poll_interval,
'subscriber_id': session_id,
},
)
is_subscribed = True
except IntegrityError as err:
msg = str(err)
if (
'Key (observer_id, subscriber_id)' in msg
and 'already exists' in msg
):
# Subscriber already subscribed, we're good.
is_subscribed = True
elif (
'Key (observer_id)' in msg or 'Key (subscriber_id)' in msg
) and 'not present in table' in msg:
# Could not subscribe because observer, subscriber or
# both are missing, retry.
if retry == MAX_INTEGRITY_ERROR_RETRIES - 1:
raise
else:
raise
finally:
cursor.close()
if is_subscribed:
break
# Determine who should notify us based on the configured change
# detection mechanism.
if change_detection == Options.CHANGE_DETECTION_PUSH:
if dependencies:
tables = [model._meta.db_table for model in dependencies]
else:
tables = [self._viewset.get_queryset().model._meta.db_table]
# Register table dependencies for push observables.
for table in tables:
try:
models.Dependency.objects.get_or_create(
observer_id=self.id, table=table
)
except models.Observer.DoesNotExist:
# The observer was removed before dependency tables
# were created.
return viewset_results
elif self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
self._evaluate(viewset_results)
except Exception:
logger.exception(
"Error while subscribing to observer ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(),
)
return viewset_results
async def evaluate(self):
"""Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
"""
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
)
def _viewset_results(self):
"""Parse results from the viewset response."""
results = []
try:
response = self._viewset_method(
self._viewset.request, *self._request.args, **self._request.kwargs
)
if response.status_code == 200:
results = response.data
if not isinstance(results, list):
if isinstance(results, dict):
# XXX: This can incidently match if a single
# object has results key
if 'results' in results and isinstance(
results['results'], list
):
# Support paginated results.
results = results['results']
else:
results.setdefault(self._meta.primary_key, 1)
results = [collections.OrderedDict(results)]
else:
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
except Http404:
pass
except django_exceptions.ObjectDoesNotExist:
# The evaluation may fail when certain dependent objects (like users) are removed
# from the database. In this case, the observer is stopped.
pass
return results
def _evaluate(self, viewset_results=None):
"""Evaluate query observer.
:param viewset_results: Objects returned by the viewset query
"""
if viewset_results is None:
viewset_results = self._viewset_results()
try:
observer = models.Observer.objects.get(id=self.id)
# Do not evaluate the observer if there are no subscribers
if observer.subscribers.count() == 0:
return (None, None, None)
# Update last evaluation time.
models.Observer.objects.filter(id=self.id).update(
last_evaluation=timezone.now()
)
# Log viewsets with too much output.
max_result = get_queryobserver_settings()['warnings']['max_result_length']
if len(viewset_results) > max_result:
self._warning(
"Observed viewset returns too many results",
results=len(viewset_results),
)
new_results = collections.OrderedDict()
for order, item in enumerate(viewset_results):
if not isinstance(item, dict):
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
item = {'order': order, 'data': item}
try:
new_results[str(item['data'][self._meta.primary_key])] = item
except KeyError:
raise KeyError(
"Observable view did not return primary key field '{}'!".format(
self._meta.primary_key
)
)
# Process difference between old results and new results.
added, changed = [], []
new_ids = list(new_results.keys())
removed_qs = observer.items.exclude(primary_key__in=new_results.keys())
removed = list(removed_qs.values('order', 'data'))
maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())
with transaction.atomic():
# Removed items.
removed_qs.delete()
# Defer unique ordering constraint before processing order updates.
# NOTE: The name of the constrait is generated by Django ORM.
with connection.cursor() as cursor:
cursor.execute(
"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED"
)
# Changed items.
for item_id, old_order, old_data in maybe_changed_qs.values_list(
'primary_key', 'order', 'data'
):
new_item = new_results[item_id]
new_ids.remove(item_id)
if new_item['data'] != old_data:
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
data=new_item['data'], order=new_item['order']
)
elif new_item['order'] != old_order:
# TODO: If only order has changed, don't transmit
# full data (needs frontend support).
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
order=new_item['order']
)
# Added items.
for item_id in new_ids:
item = new_results[item_id]
added.append(item)
observer.items.create(
primary_key=item_id, order=item['order'], data=item['data']
)
return (added, changed, removed)
except models.Observer.DoesNotExist:
# Observer removed, ignore evaluation
return (None, None, None)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return '<QueryObserver: id={id} request={request}>'.format(
id=self.id, request=repr(self._request)
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/observer.py | QueryObserver._get_logging_id | python | def _get_logging_id(self):
return "{}.{}/{}".format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
self._request.viewset_method,
) | Get logging identifier. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/observer.py#L113-L119 | null | class QueryObserver:
"""Query observer.
A query observer observes a specific viewset for changes and propagates these
changes to all interested subscribers.
"""
def __init__(self, request):
"""Create new query observer.
:param request: A `queryobserver.request.Request` instance
"""
# Obtain a serializer by asking the viewset to provide one. We instantiate the
# viewset with a fake request, so that the viewset methods work as expected.
viewset = request.viewset_class()
viewset.request = api_request.Request(request)
viewset.request.method = request.method
viewset.format_kwarg = None
viewset.args = request.args
viewset.kwargs = request.kwargs
self._request = request
self._viewset = viewset
self._viewset_method = getattr(viewset, request.viewset_method)
self._meta = Options(viewset, self._viewset_method)
@property
def id(self):
"""Unique observer identifier."""
return self._request.observe_id
def _get_logging_extra(self, duration=None, results=None):
"""Extra information for logger."""
return {
'duration': duration,
'results': results,
'observer_id': self.id,
'viewset': '{}.{}'.format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
),
'method': self._request.viewset_method,
'path': self._request.path,
'get': self._request.GET,
}
def _warning(self, msg, duration=None, results=None):
"""Log warnings."""
logger.warning(
"{} ({})".format(msg, self._get_logging_id()),
extra=self._get_logging_extra(duration=duration, results=results),
)
def subscribe(self, session_id, dependencies=None):
"""Initialize observer and register subscriber.
:param session_id: Subscriber's session identifier
:param dependencies: List of ORM to register as dependencies for orm_notify
"""
try:
change_detection = self._meta.change_detection
if change_detection not in [
Options.CHANGE_DETECTION_PUSH,
Options.CHANGE_DETECTION_POLL,
]:
raise NotImplementedError(
"Change detection mechanism '{}' not implemented.".format(
change_detection
)
)
viewset_results = self._viewset_results()
poll_interval = (
self._meta.poll_interval
if change_detection == Options.CHANGE_DETECTION_POLL
else None
)
# Subscribe to observer in a single query. First, create an
# observer, then create a subscriber, and finally subscribe to
# the observer. If already subscribed, ignore the conflict.
for retry in range(MAX_INTEGRITY_ERROR_RETRIES):
is_subscribed = False
cursor = connection.cursor()
try:
cursor.execute(
"""
WITH inserted_observer AS (
INSERT into {observer_table} ("id", "request", "poll_interval")
VALUES (%(observer_id)s, %(request)s, %(poll_interval)s)
ON CONFLICT DO NOTHING
), inserted_subscriber AS (
INSERT into {subscriber_table} ("session_id", "created")
VALUES (%(subscriber_id)s, NOW())
ON CONFLICT DO NOTHING
)
INSERT INTO {observer_subscribers_table} ("observer_id", "subscriber_id")
VALUES (%(observer_id)s, %(subscriber_id)s)
""".format(
observer_table=models.Observer._meta.db_table,
subscriber_table=models.Subscriber._meta.db_table,
observer_subscribers_table=models.Observer.subscribers.through._meta.db_table,
),
params={
'observer_id': self.id,
'request': pickle.dumps(self._request),
'poll_interval': poll_interval,
'subscriber_id': session_id,
},
)
is_subscribed = True
except IntegrityError as err:
msg = str(err)
if (
'Key (observer_id, subscriber_id)' in msg
and 'already exists' in msg
):
# Subscriber already subscribed, we're good.
is_subscribed = True
elif (
'Key (observer_id)' in msg or 'Key (subscriber_id)' in msg
) and 'not present in table' in msg:
# Could not subscribe because observer, subscriber or
# both are missing, retry.
if retry == MAX_INTEGRITY_ERROR_RETRIES - 1:
raise
else:
raise
finally:
cursor.close()
if is_subscribed:
break
# Determine who should notify us based on the configured change
# detection mechanism.
if change_detection == Options.CHANGE_DETECTION_PUSH:
if dependencies:
tables = [model._meta.db_table for model in dependencies]
else:
tables = [self._viewset.get_queryset().model._meta.db_table]
# Register table dependencies for push observables.
for table in tables:
try:
models.Dependency.objects.get_or_create(
observer_id=self.id, table=table
)
except models.Observer.DoesNotExist:
# The observer was removed before dependency tables
# were created.
return viewset_results
elif self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
self._evaluate(viewset_results)
except Exception:
logger.exception(
"Error while subscribing to observer ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(),
)
return viewset_results
async def evaluate(self):
"""Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
"""
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
)
def _viewset_results(self):
"""Parse results from the viewset response."""
results = []
try:
response = self._viewset_method(
self._viewset.request, *self._request.args, **self._request.kwargs
)
if response.status_code == 200:
results = response.data
if not isinstance(results, list):
if isinstance(results, dict):
# XXX: This can incidently match if a single
# object has results key
if 'results' in results and isinstance(
results['results'], list
):
# Support paginated results.
results = results['results']
else:
results.setdefault(self._meta.primary_key, 1)
results = [collections.OrderedDict(results)]
else:
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
except Http404:
pass
except django_exceptions.ObjectDoesNotExist:
# The evaluation may fail when certain dependent objects (like users) are removed
# from the database. In this case, the observer is stopped.
pass
return results
def _evaluate(self, viewset_results=None):
"""Evaluate query observer.
:param viewset_results: Objects returned by the viewset query
"""
if viewset_results is None:
viewset_results = self._viewset_results()
try:
observer = models.Observer.objects.get(id=self.id)
# Do not evaluate the observer if there are no subscribers
if observer.subscribers.count() == 0:
return (None, None, None)
# Update last evaluation time.
models.Observer.objects.filter(id=self.id).update(
last_evaluation=timezone.now()
)
# Log viewsets with too much output.
max_result = get_queryobserver_settings()['warnings']['max_result_length']
if len(viewset_results) > max_result:
self._warning(
"Observed viewset returns too many results",
results=len(viewset_results),
)
new_results = collections.OrderedDict()
for order, item in enumerate(viewset_results):
if not isinstance(item, dict):
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
item = {'order': order, 'data': item}
try:
new_results[str(item['data'][self._meta.primary_key])] = item
except KeyError:
raise KeyError(
"Observable view did not return primary key field '{}'!".format(
self._meta.primary_key
)
)
# Process difference between old results and new results.
added, changed = [], []
new_ids = list(new_results.keys())
removed_qs = observer.items.exclude(primary_key__in=new_results.keys())
removed = list(removed_qs.values('order', 'data'))
maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())
with transaction.atomic():
# Removed items.
removed_qs.delete()
# Defer unique ordering constraint before processing order updates.
# NOTE: The name of the constrait is generated by Django ORM.
with connection.cursor() as cursor:
cursor.execute(
"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED"
)
# Changed items.
for item_id, old_order, old_data in maybe_changed_qs.values_list(
'primary_key', 'order', 'data'
):
new_item = new_results[item_id]
new_ids.remove(item_id)
if new_item['data'] != old_data:
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
data=new_item['data'], order=new_item['order']
)
elif new_item['order'] != old_order:
# TODO: If only order has changed, don't transmit
# full data (needs frontend support).
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
order=new_item['order']
)
# Added items.
for item_id in new_ids:
item = new_results[item_id]
added.append(item)
observer.items.create(
primary_key=item_id, order=item['order'], data=item['data']
)
return (added, changed, removed)
except models.Observer.DoesNotExist:
# Observer removed, ignore evaluation
return (None, None, None)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return '<QueryObserver: id={id} request={request}>'.format(
id=self.id, request=repr(self._request)
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/observer.py | QueryObserver._warning | python | def _warning(self, msg, duration=None, results=None):
logger.warning(
"{} ({})".format(msg, self._get_logging_id()),
extra=self._get_logging_extra(duration=duration, results=results),
) | Log warnings. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/observer.py#L121-L126 | [
"def _get_logging_extra(self, duration=None, results=None):\n \"\"\"Extra information for logger.\"\"\"\n return {\n 'duration': duration,\n 'results': results,\n 'observer_id': self.id,\n 'viewset': '{}.{}'.format(\n self._request.viewset_class.__module__,\n ... | class QueryObserver:
"""Query observer.
A query observer observes a specific viewset for changes and propagates these
changes to all interested subscribers.
"""
def __init__(self, request):
"""Create new query observer.
:param request: A `queryobserver.request.Request` instance
"""
# Obtain a serializer by asking the viewset to provide one. We instantiate the
# viewset with a fake request, so that the viewset methods work as expected.
viewset = request.viewset_class()
viewset.request = api_request.Request(request)
viewset.request.method = request.method
viewset.format_kwarg = None
viewset.args = request.args
viewset.kwargs = request.kwargs
self._request = request
self._viewset = viewset
self._viewset_method = getattr(viewset, request.viewset_method)
self._meta = Options(viewset, self._viewset_method)
@property
def id(self):
"""Unique observer identifier."""
return self._request.observe_id
def _get_logging_extra(self, duration=None, results=None):
"""Extra information for logger."""
return {
'duration': duration,
'results': results,
'observer_id': self.id,
'viewset': '{}.{}'.format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
),
'method': self._request.viewset_method,
'path': self._request.path,
'get': self._request.GET,
}
def _get_logging_id(self):
"""Get logging identifier."""
return "{}.{}/{}".format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
self._request.viewset_method,
)
def subscribe(self, session_id, dependencies=None):
"""Initialize observer and register subscriber.
:param session_id: Subscriber's session identifier
:param dependencies: List of ORM to register as dependencies for orm_notify
"""
try:
change_detection = self._meta.change_detection
if change_detection not in [
Options.CHANGE_DETECTION_PUSH,
Options.CHANGE_DETECTION_POLL,
]:
raise NotImplementedError(
"Change detection mechanism '{}' not implemented.".format(
change_detection
)
)
viewset_results = self._viewset_results()
poll_interval = (
self._meta.poll_interval
if change_detection == Options.CHANGE_DETECTION_POLL
else None
)
# Subscribe to observer in a single query. First, create an
# observer, then create a subscriber, and finally subscribe to
# the observer. If already subscribed, ignore the conflict.
for retry in range(MAX_INTEGRITY_ERROR_RETRIES):
is_subscribed = False
cursor = connection.cursor()
try:
cursor.execute(
"""
WITH inserted_observer AS (
INSERT into {observer_table} ("id", "request", "poll_interval")
VALUES (%(observer_id)s, %(request)s, %(poll_interval)s)
ON CONFLICT DO NOTHING
), inserted_subscriber AS (
INSERT into {subscriber_table} ("session_id", "created")
VALUES (%(subscriber_id)s, NOW())
ON CONFLICT DO NOTHING
)
INSERT INTO {observer_subscribers_table} ("observer_id", "subscriber_id")
VALUES (%(observer_id)s, %(subscriber_id)s)
""".format(
observer_table=models.Observer._meta.db_table,
subscriber_table=models.Subscriber._meta.db_table,
observer_subscribers_table=models.Observer.subscribers.through._meta.db_table,
),
params={
'observer_id': self.id,
'request': pickle.dumps(self._request),
'poll_interval': poll_interval,
'subscriber_id': session_id,
},
)
is_subscribed = True
except IntegrityError as err:
msg = str(err)
if (
'Key (observer_id, subscriber_id)' in msg
and 'already exists' in msg
):
# Subscriber already subscribed, we're good.
is_subscribed = True
elif (
'Key (observer_id)' in msg or 'Key (subscriber_id)' in msg
) and 'not present in table' in msg:
# Could not subscribe because observer, subscriber or
# both are missing, retry.
if retry == MAX_INTEGRITY_ERROR_RETRIES - 1:
raise
else:
raise
finally:
cursor.close()
if is_subscribed:
break
# Determine who should notify us based on the configured change
# detection mechanism.
if change_detection == Options.CHANGE_DETECTION_PUSH:
if dependencies:
tables = [model._meta.db_table for model in dependencies]
else:
tables = [self._viewset.get_queryset().model._meta.db_table]
# Register table dependencies for push observables.
for table in tables:
try:
models.Dependency.objects.get_or_create(
observer_id=self.id, table=table
)
except models.Observer.DoesNotExist:
# The observer was removed before dependency tables
# were created.
return viewset_results
elif self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
self._evaluate(viewset_results)
except Exception:
logger.exception(
"Error while subscribing to observer ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(),
)
return viewset_results
async def evaluate(self):
"""Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
"""
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
)
def _viewset_results(self):
"""Parse results from the viewset response."""
results = []
try:
response = self._viewset_method(
self._viewset.request, *self._request.args, **self._request.kwargs
)
if response.status_code == 200:
results = response.data
if not isinstance(results, list):
if isinstance(results, dict):
# XXX: This can incidently match if a single
# object has results key
if 'results' in results and isinstance(
results['results'], list
):
# Support paginated results.
results = results['results']
else:
results.setdefault(self._meta.primary_key, 1)
results = [collections.OrderedDict(results)]
else:
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
except Http404:
pass
except django_exceptions.ObjectDoesNotExist:
# The evaluation may fail when certain dependent objects (like users) are removed
# from the database. In this case, the observer is stopped.
pass
return results
def _evaluate(self, viewset_results=None):
"""Evaluate query observer.
:param viewset_results: Objects returned by the viewset query
"""
if viewset_results is None:
viewset_results = self._viewset_results()
try:
observer = models.Observer.objects.get(id=self.id)
# Do not evaluate the observer if there are no subscribers
if observer.subscribers.count() == 0:
return (None, None, None)
# Update last evaluation time.
models.Observer.objects.filter(id=self.id).update(
last_evaluation=timezone.now()
)
# Log viewsets with too much output.
max_result = get_queryobserver_settings()['warnings']['max_result_length']
if len(viewset_results) > max_result:
self._warning(
"Observed viewset returns too many results",
results=len(viewset_results),
)
new_results = collections.OrderedDict()
for order, item in enumerate(viewset_results):
if not isinstance(item, dict):
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
item = {'order': order, 'data': item}
try:
new_results[str(item['data'][self._meta.primary_key])] = item
except KeyError:
raise KeyError(
"Observable view did not return primary key field '{}'!".format(
self._meta.primary_key
)
)
# Process difference between old results and new results.
added, changed = [], []
new_ids = list(new_results.keys())
removed_qs = observer.items.exclude(primary_key__in=new_results.keys())
removed = list(removed_qs.values('order', 'data'))
maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())
with transaction.atomic():
# Removed items.
removed_qs.delete()
# Defer unique ordering constraint before processing order updates.
# NOTE: The name of the constrait is generated by Django ORM.
with connection.cursor() as cursor:
cursor.execute(
"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED"
)
# Changed items.
for item_id, old_order, old_data in maybe_changed_qs.values_list(
'primary_key', 'order', 'data'
):
new_item = new_results[item_id]
new_ids.remove(item_id)
if new_item['data'] != old_data:
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
data=new_item['data'], order=new_item['order']
)
elif new_item['order'] != old_order:
# TODO: If only order has changed, don't transmit
# full data (needs frontend support).
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
order=new_item['order']
)
# Added items.
for item_id in new_ids:
item = new_results[item_id]
added.append(item)
observer.items.create(
primary_key=item_id, order=item['order'], data=item['data']
)
return (added, changed, removed)
except models.Observer.DoesNotExist:
# Observer removed, ignore evaluation
return (None, None, None)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return '<QueryObserver: id={id} request={request}>'.format(
id=self.id, request=repr(self._request)
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/observer.py | QueryObserver.subscribe | python | def subscribe(self, session_id, dependencies=None):
try:
change_detection = self._meta.change_detection
if change_detection not in [
Options.CHANGE_DETECTION_PUSH,
Options.CHANGE_DETECTION_POLL,
]:
raise NotImplementedError(
"Change detection mechanism '{}' not implemented.".format(
change_detection
)
)
viewset_results = self._viewset_results()
poll_interval = (
self._meta.poll_interval
if change_detection == Options.CHANGE_DETECTION_POLL
else None
)
# Subscribe to observer in a single query. First, create an
# observer, then create a subscriber, and finally subscribe to
# the observer. If already subscribed, ignore the conflict.
for retry in range(MAX_INTEGRITY_ERROR_RETRIES):
is_subscribed = False
cursor = connection.cursor()
try:
cursor.execute(
"""
WITH inserted_observer AS (
INSERT into {observer_table} ("id", "request", "poll_interval")
VALUES (%(observer_id)s, %(request)s, %(poll_interval)s)
ON CONFLICT DO NOTHING
), inserted_subscriber AS (
INSERT into {subscriber_table} ("session_id", "created")
VALUES (%(subscriber_id)s, NOW())
ON CONFLICT DO NOTHING
)
INSERT INTO {observer_subscribers_table} ("observer_id", "subscriber_id")
VALUES (%(observer_id)s, %(subscriber_id)s)
""".format(
observer_table=models.Observer._meta.db_table,
subscriber_table=models.Subscriber._meta.db_table,
observer_subscribers_table=models.Observer.subscribers.through._meta.db_table,
),
params={
'observer_id': self.id,
'request': pickle.dumps(self._request),
'poll_interval': poll_interval,
'subscriber_id': session_id,
},
)
is_subscribed = True
except IntegrityError as err:
msg = str(err)
if (
'Key (observer_id, subscriber_id)' in msg
and 'already exists' in msg
):
# Subscriber already subscribed, we're good.
is_subscribed = True
elif (
'Key (observer_id)' in msg or 'Key (subscriber_id)' in msg
) and 'not present in table' in msg:
# Could not subscribe because observer, subscriber or
# both are missing, retry.
if retry == MAX_INTEGRITY_ERROR_RETRIES - 1:
raise
else:
raise
finally:
cursor.close()
if is_subscribed:
break
# Determine who should notify us based on the configured change
# detection mechanism.
if change_detection == Options.CHANGE_DETECTION_PUSH:
if dependencies:
tables = [model._meta.db_table for model in dependencies]
else:
tables = [self._viewset.get_queryset().model._meta.db_table]
# Register table dependencies for push observables.
for table in tables:
try:
models.Dependency.objects.get_or_create(
observer_id=self.id, table=table
)
except models.Observer.DoesNotExist:
# The observer was removed before dependency tables
# were created.
return viewset_results
elif self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
self._evaluate(viewset_results)
except Exception:
logger.exception(
"Error while subscribing to observer ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(),
)
return viewset_results | Initialize observer and register subscriber.
:param session_id: Subscriber's session identifier
:param dependencies: List of ORM to register as dependencies for orm_notify | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/observer.py#L128-L250 | [
"def _get_logging_extra(self, duration=None, results=None):\n \"\"\"Extra information for logger.\"\"\"\n return {\n 'duration': duration,\n 'results': results,\n 'observer_id': self.id,\n 'viewset': '{}.{}'.format(\n self._request.viewset_class.__module__,\n ... | class QueryObserver:
"""Query observer.
A query observer observes a specific viewset for changes and propagates these
changes to all interested subscribers.
"""
def __init__(self, request):
"""Create new query observer.
:param request: A `queryobserver.request.Request` instance
"""
# Obtain a serializer by asking the viewset to provide one. We instantiate the
# viewset with a fake request, so that the viewset methods work as expected.
viewset = request.viewset_class()
viewset.request = api_request.Request(request)
viewset.request.method = request.method
viewset.format_kwarg = None
viewset.args = request.args
viewset.kwargs = request.kwargs
self._request = request
self._viewset = viewset
self._viewset_method = getattr(viewset, request.viewset_method)
self._meta = Options(viewset, self._viewset_method)
@property
def id(self):
"""Unique observer identifier."""
return self._request.observe_id
def _get_logging_extra(self, duration=None, results=None):
"""Extra information for logger."""
return {
'duration': duration,
'results': results,
'observer_id': self.id,
'viewset': '{}.{}'.format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
),
'method': self._request.viewset_method,
'path': self._request.path,
'get': self._request.GET,
}
def _get_logging_id(self):
"""Get logging identifier."""
return "{}.{}/{}".format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
self._request.viewset_method,
)
def _warning(self, msg, duration=None, results=None):
"""Log warnings."""
logger.warning(
"{} ({})".format(msg, self._get_logging_id()),
extra=self._get_logging_extra(duration=duration, results=results),
)
async def evaluate(self):
"""Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
"""
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
)
def _viewset_results(self):
"""Parse results from the viewset response."""
results = []
try:
response = self._viewset_method(
self._viewset.request, *self._request.args, **self._request.kwargs
)
if response.status_code == 200:
results = response.data
if not isinstance(results, list):
if isinstance(results, dict):
# XXX: This can incidently match if a single
# object has results key
if 'results' in results and isinstance(
results['results'], list
):
# Support paginated results.
results = results['results']
else:
results.setdefault(self._meta.primary_key, 1)
results = [collections.OrderedDict(results)]
else:
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
except Http404:
pass
except django_exceptions.ObjectDoesNotExist:
# The evaluation may fail when certain dependent objects (like users) are removed
# from the database. In this case, the observer is stopped.
pass
return results
def _evaluate(self, viewset_results=None):
"""Evaluate query observer.
:param viewset_results: Objects returned by the viewset query
"""
if viewset_results is None:
viewset_results = self._viewset_results()
try:
observer = models.Observer.objects.get(id=self.id)
# Do not evaluate the observer if there are no subscribers
if observer.subscribers.count() == 0:
return (None, None, None)
# Update last evaluation time.
models.Observer.objects.filter(id=self.id).update(
last_evaluation=timezone.now()
)
# Log viewsets with too much output.
max_result = get_queryobserver_settings()['warnings']['max_result_length']
if len(viewset_results) > max_result:
self._warning(
"Observed viewset returns too many results",
results=len(viewset_results),
)
new_results = collections.OrderedDict()
for order, item in enumerate(viewset_results):
if not isinstance(item, dict):
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
item = {'order': order, 'data': item}
try:
new_results[str(item['data'][self._meta.primary_key])] = item
except KeyError:
raise KeyError(
"Observable view did not return primary key field '{}'!".format(
self._meta.primary_key
)
)
# Process difference between old results and new results.
added, changed = [], []
new_ids = list(new_results.keys())
removed_qs = observer.items.exclude(primary_key__in=new_results.keys())
removed = list(removed_qs.values('order', 'data'))
maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())
with transaction.atomic():
# Removed items.
removed_qs.delete()
# Defer unique ordering constraint before processing order updates.
# NOTE: The name of the constrait is generated by Django ORM.
with connection.cursor() as cursor:
cursor.execute(
"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED"
)
# Changed items.
for item_id, old_order, old_data in maybe_changed_qs.values_list(
'primary_key', 'order', 'data'
):
new_item = new_results[item_id]
new_ids.remove(item_id)
if new_item['data'] != old_data:
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
data=new_item['data'], order=new_item['order']
)
elif new_item['order'] != old_order:
# TODO: If only order has changed, don't transmit
# full data (needs frontend support).
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
order=new_item['order']
)
# Added items.
for item_id in new_ids:
item = new_results[item_id]
added.append(item)
observer.items.create(
primary_key=item_id, order=item['order'], data=item['data']
)
return (added, changed, removed)
except models.Observer.DoesNotExist:
# Observer removed, ignore evaluation
return (None, None, None)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return '<QueryObserver: id={id} request={request}>'.format(
id=self.id, request=repr(self._request)
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/observer.py | QueryObserver.evaluate | python | async def evaluate(self):
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
) | Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only) | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/observer.py#L252-L325 | [
"def get_queryobserver_settings():\n \"\"\"Query observer connection configuration.\"\"\"\n defaults = {\n # Observers going over these limits will emit warnings.\n 'warnings': {'max_result_length': 1000, 'max_processing_time': 1.0},\n # Observers going over these limits will be stopped.\... | class QueryObserver:
"""Query observer.
A query observer observes a specific viewset for changes and propagates these
changes to all interested subscribers.
"""
def __init__(self, request):
"""Create new query observer.
:param request: A `queryobserver.request.Request` instance
"""
# Obtain a serializer by asking the viewset to provide one. We instantiate the
# viewset with a fake request, so that the viewset methods work as expected.
viewset = request.viewset_class()
viewset.request = api_request.Request(request)
viewset.request.method = request.method
viewset.format_kwarg = None
viewset.args = request.args
viewset.kwargs = request.kwargs
self._request = request
self._viewset = viewset
self._viewset_method = getattr(viewset, request.viewset_method)
self._meta = Options(viewset, self._viewset_method)
@property
def id(self):
"""Unique observer identifier."""
return self._request.observe_id
def _get_logging_extra(self, duration=None, results=None):
"""Extra information for logger."""
return {
'duration': duration,
'results': results,
'observer_id': self.id,
'viewset': '{}.{}'.format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
),
'method': self._request.viewset_method,
'path': self._request.path,
'get': self._request.GET,
}
def _get_logging_id(self):
"""Get logging identifier."""
return "{}.{}/{}".format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
self._request.viewset_method,
)
def _warning(self, msg, duration=None, results=None):
"""Log warnings."""
logger.warning(
"{} ({})".format(msg, self._get_logging_id()),
extra=self._get_logging_extra(duration=duration, results=results),
)
def subscribe(self, session_id, dependencies=None):
"""Initialize observer and register subscriber.
:param session_id: Subscriber's session identifier
:param dependencies: List of ORM to register as dependencies for orm_notify
"""
try:
change_detection = self._meta.change_detection
if change_detection not in [
Options.CHANGE_DETECTION_PUSH,
Options.CHANGE_DETECTION_POLL,
]:
raise NotImplementedError(
"Change detection mechanism '{}' not implemented.".format(
change_detection
)
)
viewset_results = self._viewset_results()
poll_interval = (
self._meta.poll_interval
if change_detection == Options.CHANGE_DETECTION_POLL
else None
)
# Subscribe to observer in a single query. First, create an
# observer, then create a subscriber, and finally subscribe to
# the observer. If already subscribed, ignore the conflict.
for retry in range(MAX_INTEGRITY_ERROR_RETRIES):
is_subscribed = False
cursor = connection.cursor()
try:
cursor.execute(
"""
WITH inserted_observer AS (
INSERT into {observer_table} ("id", "request", "poll_interval")
VALUES (%(observer_id)s, %(request)s, %(poll_interval)s)
ON CONFLICT DO NOTHING
), inserted_subscriber AS (
INSERT into {subscriber_table} ("session_id", "created")
VALUES (%(subscriber_id)s, NOW())
ON CONFLICT DO NOTHING
)
INSERT INTO {observer_subscribers_table} ("observer_id", "subscriber_id")
VALUES (%(observer_id)s, %(subscriber_id)s)
""".format(
observer_table=models.Observer._meta.db_table,
subscriber_table=models.Subscriber._meta.db_table,
observer_subscribers_table=models.Observer.subscribers.through._meta.db_table,
),
params={
'observer_id': self.id,
'request': pickle.dumps(self._request),
'poll_interval': poll_interval,
'subscriber_id': session_id,
},
)
is_subscribed = True
except IntegrityError as err:
msg = str(err)
if (
'Key (observer_id, subscriber_id)' in msg
and 'already exists' in msg
):
# Subscriber already subscribed, we're good.
is_subscribed = True
elif (
'Key (observer_id)' in msg or 'Key (subscriber_id)' in msg
) and 'not present in table' in msg:
# Could not subscribe because observer, subscriber or
# both are missing, retry.
if retry == MAX_INTEGRITY_ERROR_RETRIES - 1:
raise
else:
raise
finally:
cursor.close()
if is_subscribed:
break
# Determine who should notify us based on the configured change
# detection mechanism.
if change_detection == Options.CHANGE_DETECTION_PUSH:
if dependencies:
tables = [model._meta.db_table for model in dependencies]
else:
tables = [self._viewset.get_queryset().model._meta.db_table]
# Register table dependencies for push observables.
for table in tables:
try:
models.Dependency.objects.get_or_create(
observer_id=self.id, table=table
)
except models.Observer.DoesNotExist:
# The observer was removed before dependency tables
# were created.
return viewset_results
elif self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
self._evaluate(viewset_results)
except Exception:
logger.exception(
"Error while subscribing to observer ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(),
)
return viewset_results
def _viewset_results(self):
"""Parse results from the viewset response."""
results = []
try:
response = self._viewset_method(
self._viewset.request, *self._request.args, **self._request.kwargs
)
if response.status_code == 200:
results = response.data
if not isinstance(results, list):
if isinstance(results, dict):
# XXX: This can incidently match if a single
# object has results key
if 'results' in results and isinstance(
results['results'], list
):
# Support paginated results.
results = results['results']
else:
results.setdefault(self._meta.primary_key, 1)
results = [collections.OrderedDict(results)]
else:
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
except Http404:
pass
except django_exceptions.ObjectDoesNotExist:
# The evaluation may fail when certain dependent objects (like users) are removed
# from the database. In this case, the observer is stopped.
pass
return results
def _evaluate(self, viewset_results=None):
"""Evaluate query observer.
:param viewset_results: Objects returned by the viewset query
"""
if viewset_results is None:
viewset_results = self._viewset_results()
try:
observer = models.Observer.objects.get(id=self.id)
# Do not evaluate the observer if there are no subscribers
if observer.subscribers.count() == 0:
return (None, None, None)
# Update last evaluation time.
models.Observer.objects.filter(id=self.id).update(
last_evaluation=timezone.now()
)
# Log viewsets with too much output.
max_result = get_queryobserver_settings()['warnings']['max_result_length']
if len(viewset_results) > max_result:
self._warning(
"Observed viewset returns too many results",
results=len(viewset_results),
)
new_results = collections.OrderedDict()
for order, item in enumerate(viewset_results):
if not isinstance(item, dict):
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
item = {'order': order, 'data': item}
try:
new_results[str(item['data'][self._meta.primary_key])] = item
except KeyError:
raise KeyError(
"Observable view did not return primary key field '{}'!".format(
self._meta.primary_key
)
)
# Process difference between old results and new results.
added, changed = [], []
new_ids = list(new_results.keys())
removed_qs = observer.items.exclude(primary_key__in=new_results.keys())
removed = list(removed_qs.values('order', 'data'))
maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())
with transaction.atomic():
# Removed items.
removed_qs.delete()
# Defer unique ordering constraint before processing order updates.
# NOTE: The name of the constrait is generated by Django ORM.
with connection.cursor() as cursor:
cursor.execute(
"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED"
)
# Changed items.
for item_id, old_order, old_data in maybe_changed_qs.values_list(
'primary_key', 'order', 'data'
):
new_item = new_results[item_id]
new_ids.remove(item_id)
if new_item['data'] != old_data:
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
data=new_item['data'], order=new_item['order']
)
elif new_item['order'] != old_order:
# TODO: If only order has changed, don't transmit
# full data (needs frontend support).
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
order=new_item['order']
)
# Added items.
for item_id in new_ids:
item = new_results[item_id]
added.append(item)
observer.items.create(
primary_key=item_id, order=item['order'], data=item['data']
)
return (added, changed, removed)
except models.Observer.DoesNotExist:
# Observer removed, ignore evaluation
return (None, None, None)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return '<QueryObserver: id={id} request={request}>'.format(
id=self.id, request=repr(self._request)
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/observer.py | QueryObserver._viewset_results | python | def _viewset_results(self):
results = []
try:
response = self._viewset_method(
self._viewset.request, *self._request.args, **self._request.kwargs
)
if response.status_code == 200:
results = response.data
if not isinstance(results, list):
if isinstance(results, dict):
# XXX: This can incidently match if a single
# object has results key
if 'results' in results and isinstance(
results['results'], list
):
# Support paginated results.
results = results['results']
else:
results.setdefault(self._meta.primary_key, 1)
results = [collections.OrderedDict(results)]
else:
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
except Http404:
pass
except django_exceptions.ObjectDoesNotExist:
# The evaluation may fail when certain dependent objects (like users) are removed
# from the database. In this case, the observer is stopped.
pass
return results | Parse results from the viewset response. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/observer.py#L327-L361 | null | class QueryObserver:
"""Query observer.
A query observer observes a specific viewset for changes and propagates these
changes to all interested subscribers.
"""
def __init__(self, request):
"""Create new query observer.
:param request: A `queryobserver.request.Request` instance
"""
# Obtain a serializer by asking the viewset to provide one. We instantiate the
# viewset with a fake request, so that the viewset methods work as expected.
viewset = request.viewset_class()
viewset.request = api_request.Request(request)
viewset.request.method = request.method
viewset.format_kwarg = None
viewset.args = request.args
viewset.kwargs = request.kwargs
self._request = request
self._viewset = viewset
self._viewset_method = getattr(viewset, request.viewset_method)
self._meta = Options(viewset, self._viewset_method)
@property
def id(self):
"""Unique observer identifier."""
return self._request.observe_id
def _get_logging_extra(self, duration=None, results=None):
"""Extra information for logger."""
return {
'duration': duration,
'results': results,
'observer_id': self.id,
'viewset': '{}.{}'.format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
),
'method': self._request.viewset_method,
'path': self._request.path,
'get': self._request.GET,
}
def _get_logging_id(self):
"""Get logging identifier."""
return "{}.{}/{}".format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
self._request.viewset_method,
)
def _warning(self, msg, duration=None, results=None):
"""Log warnings."""
logger.warning(
"{} ({})".format(msg, self._get_logging_id()),
extra=self._get_logging_extra(duration=duration, results=results),
)
def subscribe(self, session_id, dependencies=None):
"""Initialize observer and register subscriber.
:param session_id: Subscriber's session identifier
:param dependencies: List of ORM to register as dependencies for orm_notify
"""
try:
change_detection = self._meta.change_detection
if change_detection not in [
Options.CHANGE_DETECTION_PUSH,
Options.CHANGE_DETECTION_POLL,
]:
raise NotImplementedError(
"Change detection mechanism '{}' not implemented.".format(
change_detection
)
)
viewset_results = self._viewset_results()
poll_interval = (
self._meta.poll_interval
if change_detection == Options.CHANGE_DETECTION_POLL
else None
)
# Subscribe to observer in a single query. First, create an
# observer, then create a subscriber, and finally subscribe to
# the observer. If already subscribed, ignore the conflict.
for retry in range(MAX_INTEGRITY_ERROR_RETRIES):
is_subscribed = False
cursor = connection.cursor()
try:
cursor.execute(
"""
WITH inserted_observer AS (
INSERT into {observer_table} ("id", "request", "poll_interval")
VALUES (%(observer_id)s, %(request)s, %(poll_interval)s)
ON CONFLICT DO NOTHING
), inserted_subscriber AS (
INSERT into {subscriber_table} ("session_id", "created")
VALUES (%(subscriber_id)s, NOW())
ON CONFLICT DO NOTHING
)
INSERT INTO {observer_subscribers_table} ("observer_id", "subscriber_id")
VALUES (%(observer_id)s, %(subscriber_id)s)
""".format(
observer_table=models.Observer._meta.db_table,
subscriber_table=models.Subscriber._meta.db_table,
observer_subscribers_table=models.Observer.subscribers.through._meta.db_table,
),
params={
'observer_id': self.id,
'request': pickle.dumps(self._request),
'poll_interval': poll_interval,
'subscriber_id': session_id,
},
)
is_subscribed = True
except IntegrityError as err:
msg = str(err)
if (
'Key (observer_id, subscriber_id)' in msg
and 'already exists' in msg
):
# Subscriber already subscribed, we're good.
is_subscribed = True
elif (
'Key (observer_id)' in msg or 'Key (subscriber_id)' in msg
) and 'not present in table' in msg:
# Could not subscribe because observer, subscriber or
# both are missing, retry.
if retry == MAX_INTEGRITY_ERROR_RETRIES - 1:
raise
else:
raise
finally:
cursor.close()
if is_subscribed:
break
# Determine who should notify us based on the configured change
# detection mechanism.
if change_detection == Options.CHANGE_DETECTION_PUSH:
if dependencies:
tables = [model._meta.db_table for model in dependencies]
else:
tables = [self._viewset.get_queryset().model._meta.db_table]
# Register table dependencies for push observables.
for table in tables:
try:
models.Dependency.objects.get_or_create(
observer_id=self.id, table=table
)
except models.Observer.DoesNotExist:
# The observer was removed before dependency tables
# were created.
return viewset_results
elif self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
self._evaluate(viewset_results)
except Exception:
logger.exception(
"Error while subscribing to observer ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(),
)
return viewset_results
async def evaluate(self):
"""Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
"""
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
)
def _evaluate(self, viewset_results=None):
"""Evaluate query observer.
:param viewset_results: Objects returned by the viewset query
"""
if viewset_results is None:
viewset_results = self._viewset_results()
try:
observer = models.Observer.objects.get(id=self.id)
# Do not evaluate the observer if there are no subscribers
if observer.subscribers.count() == 0:
return (None, None, None)
# Update last evaluation time.
models.Observer.objects.filter(id=self.id).update(
last_evaluation=timezone.now()
)
# Log viewsets with too much output.
max_result = get_queryobserver_settings()['warnings']['max_result_length']
if len(viewset_results) > max_result:
self._warning(
"Observed viewset returns too many results",
results=len(viewset_results),
)
new_results = collections.OrderedDict()
for order, item in enumerate(viewset_results):
if not isinstance(item, dict):
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
item = {'order': order, 'data': item}
try:
new_results[str(item['data'][self._meta.primary_key])] = item
except KeyError:
raise KeyError(
"Observable view did not return primary key field '{}'!".format(
self._meta.primary_key
)
)
# Process difference between old results and new results.
added, changed = [], []
new_ids = list(new_results.keys())
removed_qs = observer.items.exclude(primary_key__in=new_results.keys())
removed = list(removed_qs.values('order', 'data'))
maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())
with transaction.atomic():
# Removed items.
removed_qs.delete()
# Defer unique ordering constraint before processing order updates.
# NOTE: The name of the constrait is generated by Django ORM.
with connection.cursor() as cursor:
cursor.execute(
"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED"
)
# Changed items.
for item_id, old_order, old_data in maybe_changed_qs.values_list(
'primary_key', 'order', 'data'
):
new_item = new_results[item_id]
new_ids.remove(item_id)
if new_item['data'] != old_data:
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
data=new_item['data'], order=new_item['order']
)
elif new_item['order'] != old_order:
# TODO: If only order has changed, don't transmit
# full data (needs frontend support).
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
order=new_item['order']
)
# Added items.
for item_id in new_ids:
item = new_results[item_id]
added.append(item)
observer.items.create(
primary_key=item_id, order=item['order'], data=item['data']
)
return (added, changed, removed)
except models.Observer.DoesNotExist:
# Observer removed, ignore evaluation
return (None, None, None)
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return '<QueryObserver: id={id} request={request}>'.format(
id=self.id, request=repr(self._request)
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/observer.py | QueryObserver._evaluate | python | def _evaluate(self, viewset_results=None):
if viewset_results is None:
viewset_results = self._viewset_results()
try:
observer = models.Observer.objects.get(id=self.id)
# Do not evaluate the observer if there are no subscribers
if observer.subscribers.count() == 0:
return (None, None, None)
# Update last evaluation time.
models.Observer.objects.filter(id=self.id).update(
last_evaluation=timezone.now()
)
# Log viewsets with too much output.
max_result = get_queryobserver_settings()['warnings']['max_result_length']
if len(viewset_results) > max_result:
self._warning(
"Observed viewset returns too many results",
results=len(viewset_results),
)
new_results = collections.OrderedDict()
for order, item in enumerate(viewset_results):
if not isinstance(item, dict):
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
item = {'order': order, 'data': item}
try:
new_results[str(item['data'][self._meta.primary_key])] = item
except KeyError:
raise KeyError(
"Observable view did not return primary key field '{}'!".format(
self._meta.primary_key
)
)
# Process difference between old results and new results.
added, changed = [], []
new_ids = list(new_results.keys())
removed_qs = observer.items.exclude(primary_key__in=new_results.keys())
removed = list(removed_qs.values('order', 'data'))
maybe_changed_qs = observer.items.filter(primary_key__in=new_results.keys())
with transaction.atomic():
# Removed items.
removed_qs.delete()
# Defer unique ordering constraint before processing order updates.
# NOTE: The name of the constrait is generated by Django ORM.
with connection.cursor() as cursor:
cursor.execute(
"SET CONSTRAINTS rest_framework_reactive_item_observer_id_order_9b8adde6_uniq DEFERRED"
)
# Changed items.
for item_id, old_order, old_data in maybe_changed_qs.values_list(
'primary_key', 'order', 'data'
):
new_item = new_results[item_id]
new_ids.remove(item_id)
if new_item['data'] != old_data:
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
data=new_item['data'], order=new_item['order']
)
elif new_item['order'] != old_order:
# TODO: If only order has changed, don't transmit
# full data (needs frontend support).
changed.append(new_item)
observer.items.filter(primary_key=item_id).update(
order=new_item['order']
)
# Added items.
for item_id in new_ids:
item = new_results[item_id]
added.append(item)
observer.items.create(
primary_key=item_id, order=item['order'], data=item['data']
)
return (added, changed, removed)
except models.Observer.DoesNotExist:
# Observer removed, ignore evaluation
return (None, None, None) | Evaluate query observer.
:param viewset_results: Objects returned by the viewset query | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/observer.py#L363-L459 | [
"def _viewset_results(self):\n \"\"\"Parse results from the viewset response.\"\"\"\n results = []\n try:\n response = self._viewset_method(\n self._viewset.request, *self._request.args, **self._request.kwargs\n )\n\n if response.status_code == 200:\n results = re... | class QueryObserver:
"""Query observer.
A query observer observes a specific viewset for changes and propagates these
changes to all interested subscribers.
"""
def __init__(self, request):
"""Create new query observer.
:param request: A `queryobserver.request.Request` instance
"""
# Obtain a serializer by asking the viewset to provide one. We instantiate the
# viewset with a fake request, so that the viewset methods work as expected.
viewset = request.viewset_class()
viewset.request = api_request.Request(request)
viewset.request.method = request.method
viewset.format_kwarg = None
viewset.args = request.args
viewset.kwargs = request.kwargs
self._request = request
self._viewset = viewset
self._viewset_method = getattr(viewset, request.viewset_method)
self._meta = Options(viewset, self._viewset_method)
@property
def id(self):
"""Unique observer identifier."""
return self._request.observe_id
def _get_logging_extra(self, duration=None, results=None):
"""Extra information for logger."""
return {
'duration': duration,
'results': results,
'observer_id': self.id,
'viewset': '{}.{}'.format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
),
'method': self._request.viewset_method,
'path': self._request.path,
'get': self._request.GET,
}
def _get_logging_id(self):
"""Get logging identifier."""
return "{}.{}/{}".format(
self._request.viewset_class.__module__,
self._request.viewset_class.__name__,
self._request.viewset_method,
)
def _warning(self, msg, duration=None, results=None):
"""Log warnings."""
logger.warning(
"{} ({})".format(msg, self._get_logging_id()),
extra=self._get_logging_extra(duration=duration, results=results),
)
def subscribe(self, session_id, dependencies=None):
"""Initialize observer and register subscriber.
:param session_id: Subscriber's session identifier
:param dependencies: List of ORM to register as dependencies for orm_notify
"""
try:
change_detection = self._meta.change_detection
if change_detection not in [
Options.CHANGE_DETECTION_PUSH,
Options.CHANGE_DETECTION_POLL,
]:
raise NotImplementedError(
"Change detection mechanism '{}' not implemented.".format(
change_detection
)
)
viewset_results = self._viewset_results()
poll_interval = (
self._meta.poll_interval
if change_detection == Options.CHANGE_DETECTION_POLL
else None
)
# Subscribe to observer in a single query. First, create an
# observer, then create a subscriber, and finally subscribe to
# the observer. If already subscribed, ignore the conflict.
for retry in range(MAX_INTEGRITY_ERROR_RETRIES):
is_subscribed = False
cursor = connection.cursor()
try:
cursor.execute(
"""
WITH inserted_observer AS (
INSERT into {observer_table} ("id", "request", "poll_interval")
VALUES (%(observer_id)s, %(request)s, %(poll_interval)s)
ON CONFLICT DO NOTHING
), inserted_subscriber AS (
INSERT into {subscriber_table} ("session_id", "created")
VALUES (%(subscriber_id)s, NOW())
ON CONFLICT DO NOTHING
)
INSERT INTO {observer_subscribers_table} ("observer_id", "subscriber_id")
VALUES (%(observer_id)s, %(subscriber_id)s)
""".format(
observer_table=models.Observer._meta.db_table,
subscriber_table=models.Subscriber._meta.db_table,
observer_subscribers_table=models.Observer.subscribers.through._meta.db_table,
),
params={
'observer_id': self.id,
'request': pickle.dumps(self._request),
'poll_interval': poll_interval,
'subscriber_id': session_id,
},
)
is_subscribed = True
except IntegrityError as err:
msg = str(err)
if (
'Key (observer_id, subscriber_id)' in msg
and 'already exists' in msg
):
# Subscriber already subscribed, we're good.
is_subscribed = True
elif (
'Key (observer_id)' in msg or 'Key (subscriber_id)' in msg
) and 'not present in table' in msg:
# Could not subscribe because observer, subscriber or
# both are missing, retry.
if retry == MAX_INTEGRITY_ERROR_RETRIES - 1:
raise
else:
raise
finally:
cursor.close()
if is_subscribed:
break
# Determine who should notify us based on the configured change
# detection mechanism.
if change_detection == Options.CHANGE_DETECTION_PUSH:
if dependencies:
tables = [model._meta.db_table for model in dependencies]
else:
tables = [self._viewset.get_queryset().model._meta.db_table]
# Register table dependencies for push observables.
for table in tables:
try:
models.Dependency.objects.get_or_create(
observer_id=self.id, table=table
)
except models.Observer.DoesNotExist:
# The observer was removed before dependency tables
# were created.
return viewset_results
elif self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
async_to_sync(get_channel_layer().send)(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
self._evaluate(viewset_results)
except Exception:
logger.exception(
"Error while subscribing to observer ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(),
)
return viewset_results
async def evaluate(self):
"""Evaluate the query observer.
:param return_emitted: True if the emitted diffs should be returned (testing only)
"""
@database_sync_to_async
def remove_subscribers():
models.Observer.subscribers.through.objects.filter(
observer_id=self.id
).delete()
@database_sync_to_async
def get_subscriber_sessions():
return list(
models.Observer.subscribers.through.objects.filter(observer_id=self.id)
.distinct('subscriber_id')
.values_list('subscriber_id', flat=True)
)
try:
settings = get_queryobserver_settings()
start = time.time()
# Evaluate the observer
added, changed, removed = await database_sync_to_async(self._evaluate)()
duration = time.time() - start
# Log slow observers.
if duration > settings['warnings']['max_processing_time']:
self._warning("Slow observed viewset", duration=duration)
# Remove subscribers of really slow observers.
if duration > settings['errors']['max_processing_time']:
logger.error(
"Removing subscribers to extremely slow observed viewset ({})".format(
self._get_logging_id()
),
extra=self._get_logging_extra(duration=duration),
)
await remove_subscribers()
if self._meta.change_detection == Options.CHANGE_DETECTION_POLL:
# Register poller.
await get_channel_layer().send(
CHANNEL_MAIN,
{
'type': TYPE_POLL,
'observer': self.id,
'interval': self._meta.poll_interval,
},
)
message = {
'type': TYPE_ITEM_UPDATE,
'observer': self.id,
'primary_key': self._meta.primary_key,
'added': added,
'changed': changed,
'removed': removed,
}
# Only generate notifications in case there were any changes.
if added or changed or removed:
for session_id in await get_subscriber_sessions():
await get_channel_layer().group_send(
GROUP_SESSIONS.format(session_id=session_id), message
)
except Exception:
logger.exception(
"Error while evaluating observer ({})".format(self._get_logging_id()),
extra=self._get_logging_extra(),
)
def _viewset_results(self):
"""Parse results from the viewset response."""
results = []
try:
response = self._viewset_method(
self._viewset.request, *self._request.args, **self._request.kwargs
)
if response.status_code == 200:
results = response.data
if not isinstance(results, list):
if isinstance(results, dict):
# XXX: This can incidently match if a single
# object has results key
if 'results' in results and isinstance(
results['results'], list
):
# Support paginated results.
results = results['results']
else:
results.setdefault(self._meta.primary_key, 1)
results = [collections.OrderedDict(results)]
else:
raise ValueError(
"Observable views must return a dictionary or a list of dictionaries!"
)
except Http404:
pass
except django_exceptions.ObjectDoesNotExist:
# The evaluation may fail when certain dependent objects (like users) are removed
# from the database. In this case, the observer is stopped.
pass
return results
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
def __repr__(self):
return '<QueryObserver: id={id} request={request}>'.format(
id=self.id, request=repr(self._request)
)
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/decorators.py | observable | python | def observable(
_method_or_viewset=None, poll_interval=None, primary_key=None, dependencies=None
):
if poll_interval and dependencies:
raise ValueError('Only one of poll_interval and dependencies arguments allowed')
def decorator_observable(method_or_viewset):
if inspect.isclass(method_or_viewset):
list_method = getattr(method_or_viewset, 'list', None)
if list_method is not None:
method_or_viewset.list = observable(list_method)
return method_or_viewset
# Do not decorate an already observable method twice.
if getattr(method_or_viewset, 'is_observable', False):
return method_or_viewset
@functools.wraps(method_or_viewset)
def wrapper(self, request, *args, **kwargs):
if observer_request.OBSERVABLE_QUERY_PARAMETER in request.query_params:
# TODO: Validate the session identifier.
session_id = request.query_params[
observer_request.OBSERVABLE_QUERY_PARAMETER
]
# Create request and subscribe the session to given observer.
request = observer_request.Request(
self.__class__, method_or_viewset.__name__, request, args, kwargs
)
# Initialize observer and subscribe.
instance = observer.QueryObserver(request)
data = instance.subscribe(session_id, dependencies)
return response.Response({'observer': instance.id, 'items': data})
else:
# Non-reactive API.
return method_or_viewset(self, request, *args, **kwargs)
wrapper.is_observable = True
if poll_interval is not None:
wrapper.observable_change_detection = observer.Options.CHANGE_DETECTION_POLL
wrapper.observable_poll_interval = poll_interval
if primary_key is not None:
wrapper.observable_primary_key = primary_key
return wrapper
if _method_or_viewset is None:
return decorator_observable
else:
return decorator_observable(_method_or_viewset) | Make ViewSet or ViewSet method observable.
Decorating a ViewSet class is the same as decorating its `list` method.
If decorated method returns a response containing a list of items, it must
use the provided `LimitOffsetPagination` for any pagination. In case a
non-list response is returned, the resulting item will be wrapped into a
list.
When multiple decorators are used, `observable` must be the first one to be
applied as it needs access to the method name.
:param poll_interval: Configure given observable as a polling observable
:param primary_key: Primary key for tracking observable items
:param dependencies: List of ORM to register as dependencies for
orm_notify. If None the observer will subscribe to notifications from
the queryset model. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/decorators.py#L10-L84 | [
"def decorator_observable(method_or_viewset):\n\n if inspect.isclass(method_or_viewset):\n list_method = getattr(method_or_viewset, 'list', None)\n if list_method is not None:\n method_or_viewset.list = observable(list_method)\n\n return method_or_viewset\n\n # Do not decorate ... | import functools
import inspect
from rest_framework import response
from . import observer
from . import request as observer_request
|
genialis/django-rest-framework-reactive | src/rest_framework_reactive/management/commands/clearobservers.py | Command.handle | python | def handle(self, *args, **options):
models.Observer.objects.all().delete()
models.Subscriber.objects.all().delete()
for cache_key in cache.keys(search='{}*'.format(THROTTLE_CACHE_PREFIX)):
cache.delete(cache_key) | Command handle. | train | https://github.com/genialis/django-rest-framework-reactive/blob/ddf3d899685a54b6bd0ae4b3789649a89340c59f/src/rest_framework_reactive/management/commands/clearobservers.py#L13-L19 | null | class Command(BaseCommand):
"""Clear observer state."""
help = "Clear observer state: delete all observers and subscribers."
|
PyMySQL/Tornado-MySQL | tornado_mysql/cursors.py | Cursor.execute | python | def execute(self, query, args=None):
'''Execute a query'''
conn = self._get_db()
while (yield self.nextset()):
pass
if PY2: # Use bytes on Python 2 always
encoding = conn.encoding
def ensure_bytes(x):
if isinstance(x, unicode):
x = x.encode(encoding)
return x
query = ensure_bytes(query)
if args is not None:
if isinstance(args, (tuple, list)):
args = tuple(map(ensure_bytes, args))
elif isinstance(args, dict):
args = dict((ensure_bytes(key), ensure_bytes(val)) for (key, val) in args.items())
else:
args = ensure_bytes(args)
if args is not None:
query = query % self._escape_args(args, conn)
yield self._query(query)
self._executed = query
raise gen.Return(self.rowcount) | Execute a query | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/cursors.py#L104-L134 | [
"def _get_db(self):\n if not self.connection:\n raise err.ProgrammingError(\"Cursor closed\")\n return self.connection\n"
] | class Cursor(object):
"""Cursor is used to interact with the database."""
#: Max stetement size which :meth:`executemany` generates.
#:
#: Max size of allowed statement is max_allowed_packet - packet_header_size.
#: Default value of max_allowed_packet is 1048576.
max_stmt_length = 1024000
def __init__(self, connection):
'''
Do not create an instance of a Cursor yourself. Call
connections.Connection.cursor().
'''
self.connection = connection
self.description = None
self.rownumber = 0
self.rowcount = -1
self.arraysize = 1
self._executed = None
self._result = None
self._rows = None
@gen.coroutine
def close(self):
'''
Closing a cursor just exhausts all remaining data.
'''
conn = self.connection
if conn is None:
return
try:
while (yield self.nextset()):
pass
finally:
self.connection = None
def _get_db(self):
if not self.connection:
raise err.ProgrammingError("Cursor closed")
return self.connection
def _check_executed(self):
if not self._executed:
raise err.ProgrammingError("execute() first")
def _conv_row(self, row):
return row
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
@gen.coroutine
def _nextset(self, unbuffered=False):
conn = self._get_db()
current_result = self._result
if current_result is None or current_result is not conn._result:
raise gen.Return()
if not current_result.has_next:
raise gen.Return()
yield conn.next_result(unbuffered=unbuffered)
yield self._do_get_result()
raise gen.Return(True)
@gen.coroutine
def nextset(self):
"""Get the next query set"""
res = yield self._nextset(False)
raise gen.Return(res)
def _escape_args(self, args, conn):
if isinstance(args, (tuple, list)):
return tuple(conn.escape(arg) for arg in args)
elif isinstance(args, dict):
return dict((key, conn.escape(val)) for (key, val) in args.items())
else:
#If it's not a dictionary let's try escaping it anyways.
#Worst case it will throw a Value error
return conn.escape(args)
@gen.coroutine
@gen.coroutine
def executemany(self, query, args):
"""Run several data against one query
PyMySQL can execute bulkinsert for query like 'INSERT ... VALUES (%s)'.
In other form of queries, just run :meth:`execute` many times.
"""
if not args:
return
m = RE_INSERT_VALUES.match(query)
if m:
q_prefix = m.group(1)
q_values = m.group(2).rstrip()
q_postfix = m.group(3) or ''
assert q_values[0] == '(' and q_values[-1] == ')'
yield self._do_execute_many(q_prefix, q_values, q_postfix, args,
self.max_stmt_length,
self._get_db().encoding)
else:
rows = 0
for arg in args:
yield self.execute(query, arg)
rows += self.rowcount
self.rowcount = rows
raise gen.Return(self.rowcount)
@gen.coroutine
def _do_execute_many(self, prefix, values, postfix, args, max_stmt_length, encoding):
conn = self._get_db()
escape = self._escape_args
if isinstance(prefix, text_type):
prefix = prefix.encode(encoding)
if isinstance(postfix, text_type):
postfix = postfix.encode(encoding)
sql = bytearray(prefix)
args = iter(args)
v = values % escape(next(args), conn)
if isinstance(v, text_type):
v = v.encode(encoding)
sql += v
rows = 0
for arg in args:
v = values % escape(arg, conn)
if isinstance(v, text_type):
v = v.encode(encoding)
if len(sql) + len(v) + 1 > max_stmt_length:
yield self.execute(bytes(sql + postfix))
rows += self.rowcount
sql = bytearray(prefix)
else:
sql += b','
sql += v
yield self.execute(bytes(sql + postfix))
rows += self.rowcount
self.rowcount = rows
@gen.coroutine
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
conn = self._get_db()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg))
yield self._query(q)
yield self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range_type(len(args))]))
yield self._query(q)
self._executed = q
raise gen.Return(args)
def fetchone(self):
''' Fetch the next row '''
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result
def fetchmany(self, size=None):
''' Fetch several rows '''
self._check_executed()
if self._rows is None:
return ()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
''' Fetch all the rows '''
self._check_executed()
if self._rows is None:
return ()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
if not (0 <= r < len(self._rows)):
raise IndexError("out of range")
self.rownumber = r
@gen.coroutine
def _query(self, q):
conn = self._get_db()
self._last_executed = q
yield conn.query(q)
yield self._do_get_result()
@gen.coroutine
def _do_get_result(self):
conn = self._get_db()
self.rownumber = 0
self._result = result = conn._result
self.rowcount = result.affected_rows
self.description = result.description
self.lastrowid = result.insert_id
self._rows = result.rows
if result.warning_count > 0:
yield self._show_warnings(conn)
@gen.coroutine
def _show_warnings(self, conn):
ws = yield conn.show_warnings()
for w in ws:
msg = w[-1]
if PY2 and isinstance(msg, unicode):
msg = msg.encode('utf-8', 'replace')
warnings.warn(msg, err.Warning, 4)
def __iter__(self):
return iter(self.fetchone, None)
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
|
PyMySQL/Tornado-MySQL | tornado_mysql/cursors.py | Cursor.executemany | python | def executemany(self, query, args):
if not args:
return
m = RE_INSERT_VALUES.match(query)
if m:
q_prefix = m.group(1)
q_values = m.group(2).rstrip()
q_postfix = m.group(3) or ''
assert q_values[0] == '(' and q_values[-1] == ')'
yield self._do_execute_many(q_prefix, q_values, q_postfix, args,
self.max_stmt_length,
self._get_db().encoding)
else:
rows = 0
for arg in args:
yield self.execute(query, arg)
rows += self.rowcount
self.rowcount = rows
raise gen.Return(self.rowcount) | Run several data against one query
PyMySQL can execute bulkinsert for query like 'INSERT ... VALUES (%s)'.
In other form of queries, just run :meth:`execute` many times. | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/cursors.py#L137-L161 | [
"def _get_db(self):\n if not self.connection:\n raise err.ProgrammingError(\"Cursor closed\")\n return self.connection\n"
] | class Cursor(object):
"""Cursor is used to interact with the database."""
#: Max stetement size which :meth:`executemany` generates.
#:
#: Max size of allowed statement is max_allowed_packet - packet_header_size.
#: Default value of max_allowed_packet is 1048576.
max_stmt_length = 1024000
def __init__(self, connection):
'''
Do not create an instance of a Cursor yourself. Call
connections.Connection.cursor().
'''
self.connection = connection
self.description = None
self.rownumber = 0
self.rowcount = -1
self.arraysize = 1
self._executed = None
self._result = None
self._rows = None
@gen.coroutine
def close(self):
'''
Closing a cursor just exhausts all remaining data.
'''
conn = self.connection
if conn is None:
return
try:
while (yield self.nextset()):
pass
finally:
self.connection = None
def _get_db(self):
if not self.connection:
raise err.ProgrammingError("Cursor closed")
return self.connection
def _check_executed(self):
if not self._executed:
raise err.ProgrammingError("execute() first")
def _conv_row(self, row):
return row
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
@gen.coroutine
def _nextset(self, unbuffered=False):
conn = self._get_db()
current_result = self._result
if current_result is None or current_result is not conn._result:
raise gen.Return()
if not current_result.has_next:
raise gen.Return()
yield conn.next_result(unbuffered=unbuffered)
yield self._do_get_result()
raise gen.Return(True)
@gen.coroutine
def nextset(self):
"""Get the next query set"""
res = yield self._nextset(False)
raise gen.Return(res)
def _escape_args(self, args, conn):
if isinstance(args, (tuple, list)):
return tuple(conn.escape(arg) for arg in args)
elif isinstance(args, dict):
return dict((key, conn.escape(val)) for (key, val) in args.items())
else:
#If it's not a dictionary let's try escaping it anyways.
#Worst case it will throw a Value error
return conn.escape(args)
@gen.coroutine
def execute(self, query, args=None):
'''Execute a query'''
conn = self._get_db()
while (yield self.nextset()):
pass
if PY2: # Use bytes on Python 2 always
encoding = conn.encoding
def ensure_bytes(x):
if isinstance(x, unicode):
x = x.encode(encoding)
return x
query = ensure_bytes(query)
if args is not None:
if isinstance(args, (tuple, list)):
args = tuple(map(ensure_bytes, args))
elif isinstance(args, dict):
args = dict((ensure_bytes(key), ensure_bytes(val)) for (key, val) in args.items())
else:
args = ensure_bytes(args)
if args is not None:
query = query % self._escape_args(args, conn)
yield self._query(query)
self._executed = query
raise gen.Return(self.rowcount)
@gen.coroutine
@gen.coroutine
def _do_execute_many(self, prefix, values, postfix, args, max_stmt_length, encoding):
conn = self._get_db()
escape = self._escape_args
if isinstance(prefix, text_type):
prefix = prefix.encode(encoding)
if isinstance(postfix, text_type):
postfix = postfix.encode(encoding)
sql = bytearray(prefix)
args = iter(args)
v = values % escape(next(args), conn)
if isinstance(v, text_type):
v = v.encode(encoding)
sql += v
rows = 0
for arg in args:
v = values % escape(arg, conn)
if isinstance(v, text_type):
v = v.encode(encoding)
if len(sql) + len(v) + 1 > max_stmt_length:
yield self.execute(bytes(sql + postfix))
rows += self.rowcount
sql = bytearray(prefix)
else:
sql += b','
sql += v
yield self.execute(bytes(sql + postfix))
rows += self.rowcount
self.rowcount = rows
@gen.coroutine
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
conn = self._get_db()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg))
yield self._query(q)
yield self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range_type(len(args))]))
yield self._query(q)
self._executed = q
raise gen.Return(args)
def fetchone(self):
''' Fetch the next row '''
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result
def fetchmany(self, size=None):
''' Fetch several rows '''
self._check_executed()
if self._rows is None:
return ()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
''' Fetch all the rows '''
self._check_executed()
if self._rows is None:
return ()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
if not (0 <= r < len(self._rows)):
raise IndexError("out of range")
self.rownumber = r
@gen.coroutine
def _query(self, q):
conn = self._get_db()
self._last_executed = q
yield conn.query(q)
yield self._do_get_result()
@gen.coroutine
def _do_get_result(self):
conn = self._get_db()
self.rownumber = 0
self._result = result = conn._result
self.rowcount = result.affected_rows
self.description = result.description
self.lastrowid = result.insert_id
self._rows = result.rows
if result.warning_count > 0:
yield self._show_warnings(conn)
@gen.coroutine
def _show_warnings(self, conn):
ws = yield conn.show_warnings()
for w in ws:
msg = w[-1]
if PY2 and isinstance(msg, unicode):
msg = msg.encode('utf-8', 'replace')
warnings.warn(msg, err.Warning, 4)
def __iter__(self):
return iter(self.fetchone, None)
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
|
PyMySQL/Tornado-MySQL | tornado_mysql/cursors.py | SSCursor.read_next | python | def read_next(self):
row = yield self._result._read_rowdata_packet_unbuffered()
row = self._conv_row(row)
raise gen.Return(row) | Read next row | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/cursors.py#L402-L406 | [
"def _conv_row(self, row):\n return row\n"
] | class SSCursor(Cursor):
"""
Unbuffered Cursor, mainly useful for queries that return a lot of data,
or for connections to remote servers over a slow network.
Instead of copying every row of data into a buffer, this will fetch
rows as needed. The upside of this, is the client uses much less memory,
and rows are returned much faster when traveling over a slow network,
or if the result set is very big.
There are limitations, though. The MySQL protocol doesn't support
returning the total number of rows, so the only way to tell how many rows
there are is to iterate over every row returned. Also, it currently isn't
possible to scroll backwards, as only the current row is held in memory.
"""
def _conv_row(self, row):
return row
@gen.coroutine
def close(self):
conn = self.connection
if conn is None:
return
if self._result is not None and self._result is conn._result:
yield self._result._finish_unbuffered_query()
try:
while (yield self.nextset()):
pass
finally:
self.connection = None
@gen.coroutine
def _query(self, q):
conn = self._get_db()
self._last_executed = q
yield conn.query(q, unbuffered=True)
yield self._do_get_result()
raise gen.Return(self.rowcount)
@gen.coroutine
def nextset(self):
res = yield self._nextset(True)
raise gen.Return(res)
@gen.coroutine
@gen.coroutine
def fetchone(self):
""" Fetch next row """
self._check_executed()
row = yield self.read_next()
if row is None:
raise gen.Return(None)
self.rownumber += 1
raise gen.Return(row)
@gen.coroutine
def fetchall(self):
"""
Fetch all, as per MySQLdb. Pretty useless for large queries, as
it is buffered. See fetchall_unbuffered(), if you want an unbuffered
generator version of this method.
"""
rows = []
while True:
row = yield self.fetchone()
if row is None:
break
rows.append(row)
raise gen.Return(rows)
@gen.coroutine
def fetchmany(self, size=None):
""" Fetch many """
self._check_executed()
if size is None:
size = self.arraysize
rows = []
for i in range_type(size):
row = yield self.read_next()
if row is None:
break
rows.append(row)
self.rownumber += 1
raise gen.Return(rows)
@gen.coroutine
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
if value < 0:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
for _ in range_type(value):
yield self.read_next()
self.rownumber += value
elif mode == 'absolute':
if value < self.rownumber:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
end = value - self.rownumber
for _ in range_type(end):
yield self.read_next()
self.rownumber = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
|
PyMySQL/Tornado-MySQL | tornado_mysql/cursors.py | SSCursor.fetchone | python | def fetchone(self):
self._check_executed()
row = yield self.read_next()
if row is None:
raise gen.Return(None)
self.rownumber += 1
raise gen.Return(row) | Fetch next row | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/cursors.py#L409-L416 | [
"def _check_executed(self):\n if not self._executed:\n raise err.ProgrammingError(\"execute() first\")\n"
] | class SSCursor(Cursor):
"""
Unbuffered Cursor, mainly useful for queries that return a lot of data,
or for connections to remote servers over a slow network.
Instead of copying every row of data into a buffer, this will fetch
rows as needed. The upside of this, is the client uses much less memory,
and rows are returned much faster when traveling over a slow network,
or if the result set is very big.
There are limitations, though. The MySQL protocol doesn't support
returning the total number of rows, so the only way to tell how many rows
there are is to iterate over every row returned. Also, it currently isn't
possible to scroll backwards, as only the current row is held in memory.
"""
def _conv_row(self, row):
return row
@gen.coroutine
def close(self):
conn = self.connection
if conn is None:
return
if self._result is not None and self._result is conn._result:
yield self._result._finish_unbuffered_query()
try:
while (yield self.nextset()):
pass
finally:
self.connection = None
@gen.coroutine
def _query(self, q):
conn = self._get_db()
self._last_executed = q
yield conn.query(q, unbuffered=True)
yield self._do_get_result()
raise gen.Return(self.rowcount)
@gen.coroutine
def nextset(self):
res = yield self._nextset(True)
raise gen.Return(res)
@gen.coroutine
def read_next(self):
""" Read next row """
row = yield self._result._read_rowdata_packet_unbuffered()
row = self._conv_row(row)
raise gen.Return(row)
@gen.coroutine
@gen.coroutine
def fetchall(self):
"""
Fetch all, as per MySQLdb. Pretty useless for large queries, as
it is buffered. See fetchall_unbuffered(), if you want an unbuffered
generator version of this method.
"""
rows = []
while True:
row = yield self.fetchone()
if row is None:
break
rows.append(row)
raise gen.Return(rows)
@gen.coroutine
def fetchmany(self, size=None):
""" Fetch many """
self._check_executed()
if size is None:
size = self.arraysize
rows = []
for i in range_type(size):
row = yield self.read_next()
if row is None:
break
rows.append(row)
self.rownumber += 1
raise gen.Return(rows)
@gen.coroutine
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
if value < 0:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
for _ in range_type(value):
yield self.read_next()
self.rownumber += value
elif mode == 'absolute':
if value < self.rownumber:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
end = value - self.rownumber
for _ in range_type(end):
yield self.read_next()
self.rownumber = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
|
PyMySQL/Tornado-MySQL | tornado_mysql/cursors.py | SSCursor.fetchall | python | def fetchall(self):
rows = []
while True:
row = yield self.fetchone()
if row is None:
break
rows.append(row)
raise gen.Return(rows) | Fetch all, as per MySQLdb. Pretty useless for large queries, as
it is buffered. See fetchall_unbuffered(), if you want an unbuffered
generator version of this method. | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/cursors.py#L419-L431 | null | class SSCursor(Cursor):
"""
Unbuffered Cursor, mainly useful for queries that return a lot of data,
or for connections to remote servers over a slow network.
Instead of copying every row of data into a buffer, this will fetch
rows as needed. The upside of this, is the client uses much less memory,
and rows are returned much faster when traveling over a slow network,
or if the result set is very big.
There are limitations, though. The MySQL protocol doesn't support
returning the total number of rows, so the only way to tell how many rows
there are is to iterate over every row returned. Also, it currently isn't
possible to scroll backwards, as only the current row is held in memory.
"""
def _conv_row(self, row):
return row
@gen.coroutine
def close(self):
conn = self.connection
if conn is None:
return
if self._result is not None and self._result is conn._result:
yield self._result._finish_unbuffered_query()
try:
while (yield self.nextset()):
pass
finally:
self.connection = None
@gen.coroutine
def _query(self, q):
conn = self._get_db()
self._last_executed = q
yield conn.query(q, unbuffered=True)
yield self._do_get_result()
raise gen.Return(self.rowcount)
@gen.coroutine
def nextset(self):
res = yield self._nextset(True)
raise gen.Return(res)
@gen.coroutine
def read_next(self):
""" Read next row """
row = yield self._result._read_rowdata_packet_unbuffered()
row = self._conv_row(row)
raise gen.Return(row)
@gen.coroutine
def fetchone(self):
""" Fetch next row """
self._check_executed()
row = yield self.read_next()
if row is None:
raise gen.Return(None)
self.rownumber += 1
raise gen.Return(row)
@gen.coroutine
@gen.coroutine
def fetchmany(self, size=None):
""" Fetch many """
self._check_executed()
if size is None:
size = self.arraysize
rows = []
for i in range_type(size):
row = yield self.read_next()
if row is None:
break
rows.append(row)
self.rownumber += 1
raise gen.Return(rows)
@gen.coroutine
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
if value < 0:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
for _ in range_type(value):
yield self.read_next()
self.rownumber += value
elif mode == 'absolute':
if value < self.rownumber:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
end = value - self.rownumber
for _ in range_type(end):
yield self.read_next()
self.rownumber = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
|
PyMySQL/Tornado-MySQL | tornado_mysql/__init__.py | Binary | python | def Binary(x):
if isinstance(x, text_type) and not (JYTHON or IRONPYTHON):
return x.encode()
return bytes(x) | Return x as a binary type. | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/__init__.py#L77-L81 | null | '''
Tornado-MySQL: A pure-Python MySQL client library for Tornado.
Copyright (c) 2010, 2013-2014 PyMySQL contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
VERSION = (0, 6, 3, None)
from ._compat import text_type, JYTHON, IRONPYTHON
from .constants import FIELD_TYPE
from .converters import escape_dict, escape_sequence, escape_string
from .err import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError, MySQLError
from .times import Date, Time, Timestamp, \
DateFromTicks, TimeFromTicks, TimestampFromTicks
import sys
from tornado import gen
threadsafety = 1
apilevel = "2.0"
paramstyle = "format"
class DBAPISet(frozenset):
def __ne__(self, other):
if isinstance(other, set):
return super(DBAPISet, self).__ne__(self, other)
else:
return other not in self
def __eq__(self, other):
if isinstance(other, frozenset):
return frozenset.__eq__(self, other)
else:
return other in self
def __hash__(self):
return frozenset.__hash__(self)
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
@gen.coroutine
def connect(*args, **kwargs):
"""See connections.Connection.__init__() for information about defaults."""
from .connections import Connection
conn = Connection(*args, **kwargs)
yield conn.connect()
raise gen.Return(conn)
from . import connections as _orig_conn
if _orig_conn.Connection.__init__.__doc__ is not None:
connect.__doc__ = _orig_conn.Connection.__init__.__doc__ + ("""
See connections.Connection.__init__() for information about defaults.
""")
del _orig_conn
def get_client_info(): # for MySQLdb compatibility
return '.'.join(map(str, VERSION))
# we include a doctored version_info here for MySQLdb compatibility
version_info = (1,2,2,"final",0)
NULL = "NULL"
__version__ = get_client_info()
__all__ = [
'BINARY', 'Binary', 'connect', 'Connection', 'DATE', 'Date',
'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError',
'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER',
'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError',
'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel',
'connections', 'constants', 'converters', 'cursors',
'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'threadsafety', 'version_info',
"NULL","__version__",
]
|
PyMySQL/Tornado-MySQL | tornado_mysql/__init__.py | connect | python | def connect(*args, **kwargs):
from .connections import Connection
conn = Connection(*args, **kwargs)
yield conn.connect()
raise gen.Return(conn) | See connections.Connection.__init__() for information about defaults. | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/__init__.py#L84-L89 | null | '''
Tornado-MySQL: A pure-Python MySQL client library for Tornado.
Copyright (c) 2010, 2013-2014 PyMySQL contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
VERSION = (0, 6, 3, None)
from ._compat import text_type, JYTHON, IRONPYTHON
from .constants import FIELD_TYPE
from .converters import escape_dict, escape_sequence, escape_string
from .err import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError, MySQLError
from .times import Date, Time, Timestamp, \
DateFromTicks, TimeFromTicks, TimestampFromTicks
import sys
from tornado import gen
threadsafety = 1
apilevel = "2.0"
paramstyle = "format"
class DBAPISet(frozenset):
def __ne__(self, other):
if isinstance(other, set):
return super(DBAPISet, self).__ne__(self, other)
else:
return other not in self
def __eq__(self, other):
if isinstance(other, frozenset):
return frozenset.__eq__(self, other)
else:
return other in self
def __hash__(self):
return frozenset.__hash__(self)
STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING,
FIELD_TYPE.VAR_STRING])
BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB,
FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB])
NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT,
FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG,
FIELD_TYPE.TINY, FIELD_TYPE.YEAR])
DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE])
TIME = DBAPISet([FIELD_TYPE.TIME])
TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME])
DATETIME = TIMESTAMP
ROWID = DBAPISet()
def Binary(x):
"""Return x as a binary type."""
if isinstance(x, text_type) and not (JYTHON or IRONPYTHON):
return x.encode()
return bytes(x)
@gen.coroutine
from . import connections as _orig_conn
if _orig_conn.Connection.__init__.__doc__ is not None:
connect.__doc__ = _orig_conn.Connection.__init__.__doc__ + ("""
See connections.Connection.__init__() for information about defaults.
""")
del _orig_conn
def get_client_info(): # for MySQLdb compatibility
return '.'.join(map(str, VERSION))
# we include a doctored version_info here for MySQLdb compatibility
version_info = (1,2,2,"final",0)
NULL = "NULL"
__version__ = get_client_info()
__all__ = [
'BINARY', 'Binary', 'connect', 'Connection', 'DATE', 'Date',
'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError',
'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER',
'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError',
'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel',
'connections', 'constants', 'converters', 'cursors',
'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info',
'paramstyle', 'threadsafety', 'version_info',
"NULL","__version__",
]
|
PyMySQL/Tornado-MySQL | tornado_mysql/converters.py | convert_datetime | python | def convert_datetime(obj):
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(obj)
try:
ymd, hms = obj.split(sep, 1)
usecs = '0'
if '.' in hms:
hms, usecs = hms.split('.')
usecs = float('0.' + usecs) * 1e6
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':')+[usecs] ])
except ValueError:
return convert_date(obj) | Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/converters.py#L98-L129 | [
"def convert_date(obj):\n \"\"\"Returns a DATE column as a date object:\n\n >>> date_or_None('2007-02-26')\n datetime.date(2007, 2, 26)\n\n Illegal values are returned as None:\n\n >>> date_or_None('2007-02-31') is None\n True\n >>> date_or_None('0000-00-00') is None\n True\n\n ... | from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON
import sys
import binascii
import datetime
from decimal import Decimal
import re
import time
from .constants import FIELD_TYPE, FLAG
from .charset import charset_by_id, charset_to_encoding
ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]")
ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z',
'\'': '\\\'', '"': '\\"', '\\': '\\\\'}
def escape_item(val, charset):
if type(val) in [tuple, list, set]:
return escape_sequence(val, charset)
if type(val) is dict:
return escape_dict(val, charset)
encoder = encoders[type(val)]
val = encoder(val)
return val
def escape_dict(val, charset):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset)
n[k] = quoted
return n
def escape_sequence(val, charset):
n = []
for item in val:
quoted = escape_item(item, charset)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset):
val = map(lambda x: escape_item(x, charset), val)
return ','.join(val)
def escape_bool(value):
return str(int(value))
def escape_object(value):
return str(value)
def escape_int(value):
return str(value)
def escape_float(value):
return ('%.15g' % value)
def escape_string(value):
return ("%s" % (ESCAPE_REGEX.sub(
lambda match: ESCAPE_MAP.get(match.group(0)), value),))
def escape_str(value):
return "'%s'" % escape_string(value)
def escape_unicode(value):
return escape_str(value)
def escape_bytes(value):
return "x'%s'" % binascii.hexlify(value).decode(sys.getdefaultencoding())
def escape_None(value):
return 'NULL'
def escape_timedelta(obj):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
return escape_str('%02d:%02d:%02d' % (hours, minutes, seconds))
def escape_time(obj):
s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute),
int(obj.second))
if obj.microsecond:
s += ".{0:06}".format(obj.microsecond)
return escape_str(s)
def escape_datetime(obj):
return escape_str(obj.isoformat(' '))
def escape_date(obj):
return escape_str(obj.isoformat())
def escape_struct_time(obj):
return escape_datetime(datetime.datetime(*obj[:6]))
def convert_timedelta(obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
negate = 1
if hours.startswith("-"):
hours = hours[1:]
negate = -1
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = int(microseconds)
) * negate
return tdelta
except ValueError:
return None
def convert_time(obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=int(microseconds))
except ValueError:
return None
def convert_date(obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
try:
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
def convert_set(s):
return set(s.split(","))
def through(x):
return x
#def convert_bit(b):
# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
# return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
convert_bit = through
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
encoding = charset_to_encoding(field_charset)
if field.flags & FLAG.SET:
return convert_set(data.decode(encoding))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(encoding)
elif connection.charset != field_charset:
data = data.decode(encoding)
data = data.encode(connection.encoding)
return data
encoders = {
bool: escape_bool,
int: escape_int,
long_type: escape_int,
float: escape_float,
str: escape_str,
text_type: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
dict: escape_dict,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: str,
}
if not PY2 or JYTHON or IRONPYTHON:
encoders[bytes] = escape_bytes
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: through,
FIELD_TYPE.TINY_BLOB: through,
FIELD_TYPE.MEDIUM_BLOB: through,
FIELD_TYPE.LONG_BLOB: through,
FIELD_TYPE.STRING: through,
FIELD_TYPE.VAR_STRING: through,
FIELD_TYPE.VARCHAR: through,
FIELD_TYPE.DECIMAL: Decimal,
FIELD_TYPE.NEWDECIMAL: Decimal,
}
# for MySQLdb compatibility
conversions = decoders
def Thing2Literal(obj):
return escape_str(str(obj))
|
PyMySQL/Tornado-MySQL | tornado_mysql/converters.py | convert_timedelta | python | def convert_timedelta(obj):
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
negate = 1
if hours.startswith("-"):
hours = hours[1:]
negate = -1
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = int(microseconds)
) * negate
return tdelta
except ValueError:
return None | Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function. | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/converters.py#L132-L167 | null | from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON
import sys
import binascii
import datetime
from decimal import Decimal
import re
import time
from .constants import FIELD_TYPE, FLAG
from .charset import charset_by_id, charset_to_encoding
ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]")
ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z',
'\'': '\\\'', '"': '\\"', '\\': '\\\\'}
def escape_item(val, charset):
if type(val) in [tuple, list, set]:
return escape_sequence(val, charset)
if type(val) is dict:
return escape_dict(val, charset)
encoder = encoders[type(val)]
val = encoder(val)
return val
def escape_dict(val, charset):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset)
n[k] = quoted
return n
def escape_sequence(val, charset):
n = []
for item in val:
quoted = escape_item(item, charset)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset):
val = map(lambda x: escape_item(x, charset), val)
return ','.join(val)
def escape_bool(value):
return str(int(value))
def escape_object(value):
return str(value)
def escape_int(value):
return str(value)
def escape_float(value):
return ('%.15g' % value)
def escape_string(value):
return ("%s" % (ESCAPE_REGEX.sub(
lambda match: ESCAPE_MAP.get(match.group(0)), value),))
def escape_str(value):
return "'%s'" % escape_string(value)
def escape_unicode(value):
return escape_str(value)
def escape_bytes(value):
return "x'%s'" % binascii.hexlify(value).decode(sys.getdefaultencoding())
def escape_None(value):
return 'NULL'
def escape_timedelta(obj):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
return escape_str('%02d:%02d:%02d' % (hours, minutes, seconds))
def escape_time(obj):
s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute),
int(obj.second))
if obj.microsecond:
s += ".{0:06}".format(obj.microsecond)
return escape_str(s)
def escape_datetime(obj):
return escape_str(obj.isoformat(' '))
def escape_date(obj):
return escape_str(obj.isoformat())
def escape_struct_time(obj):
return escape_datetime(datetime.datetime(*obj[:6]))
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(obj)
try:
ymd, hms = obj.split(sep, 1)
usecs = '0'
if '.' in hms:
hms, usecs = hms.split('.')
usecs = float('0.' + usecs) * 1e6
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':')+[usecs] ])
except ValueError:
return convert_date(obj)
def convert_time(obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=int(microseconds))
except ValueError:
return None
def convert_date(obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
try:
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
def convert_set(s):
return set(s.split(","))
def through(x):
return x
#def convert_bit(b):
# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
# return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
convert_bit = through
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
encoding = charset_to_encoding(field_charset)
if field.flags & FLAG.SET:
return convert_set(data.decode(encoding))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(encoding)
elif connection.charset != field_charset:
data = data.decode(encoding)
data = data.encode(connection.encoding)
return data
encoders = {
bool: escape_bool,
int: escape_int,
long_type: escape_int,
float: escape_float,
str: escape_str,
text_type: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
dict: escape_dict,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: str,
}
if not PY2 or JYTHON or IRONPYTHON:
encoders[bytes] = escape_bytes
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: through,
FIELD_TYPE.TINY_BLOB: through,
FIELD_TYPE.MEDIUM_BLOB: through,
FIELD_TYPE.LONG_BLOB: through,
FIELD_TYPE.STRING: through,
FIELD_TYPE.VAR_STRING: through,
FIELD_TYPE.VARCHAR: through,
FIELD_TYPE.DECIMAL: Decimal,
FIELD_TYPE.NEWDECIMAL: Decimal,
}
# for MySQLdb compatibility
conversions = decoders
def Thing2Literal(obj):
return escape_str(str(obj))
|
PyMySQL/Tornado-MySQL | tornado_mysql/converters.py | convert_time | python | def convert_time(obj):
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=int(microseconds))
except ValueError:
return None | Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME. | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/converters.py#L169-L200 | null | from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON
import sys
import binascii
import datetime
from decimal import Decimal
import re
import time
from .constants import FIELD_TYPE, FLAG
from .charset import charset_by_id, charset_to_encoding
ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]")
ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z',
'\'': '\\\'', '"': '\\"', '\\': '\\\\'}
def escape_item(val, charset):
if type(val) in [tuple, list, set]:
return escape_sequence(val, charset)
if type(val) is dict:
return escape_dict(val, charset)
encoder = encoders[type(val)]
val = encoder(val)
return val
def escape_dict(val, charset):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset)
n[k] = quoted
return n
def escape_sequence(val, charset):
n = []
for item in val:
quoted = escape_item(item, charset)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset):
val = map(lambda x: escape_item(x, charset), val)
return ','.join(val)
def escape_bool(value):
return str(int(value))
def escape_object(value):
return str(value)
def escape_int(value):
return str(value)
def escape_float(value):
return ('%.15g' % value)
def escape_string(value):
return ("%s" % (ESCAPE_REGEX.sub(
lambda match: ESCAPE_MAP.get(match.group(0)), value),))
def escape_str(value):
return "'%s'" % escape_string(value)
def escape_unicode(value):
return escape_str(value)
def escape_bytes(value):
return "x'%s'" % binascii.hexlify(value).decode(sys.getdefaultencoding())
def escape_None(value):
return 'NULL'
def escape_timedelta(obj):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
return escape_str('%02d:%02d:%02d' % (hours, minutes, seconds))
def escape_time(obj):
s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute),
int(obj.second))
if obj.microsecond:
s += ".{0:06}".format(obj.microsecond)
return escape_str(s)
def escape_datetime(obj):
return escape_str(obj.isoformat(' '))
def escape_date(obj):
return escape_str(obj.isoformat())
def escape_struct_time(obj):
return escape_datetime(datetime.datetime(*obj[:6]))
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(obj)
try:
ymd, hms = obj.split(sep, 1)
usecs = '0'
if '.' in hms:
hms, usecs = hms.split('.')
usecs = float('0.' + usecs) * 1e6
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':')+[usecs] ])
except ValueError:
return convert_date(obj)
def convert_timedelta(obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
negate = 1
if hours.startswith("-"):
hours = hours[1:]
negate = -1
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = int(microseconds)
) * negate
return tdelta
except ValueError:
return None
def convert_date(obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
try:
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
def convert_set(s):
return set(s.split(","))
def through(x):
return x
#def convert_bit(b):
# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
# return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
convert_bit = through
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
encoding = charset_to_encoding(field_charset)
if field.flags & FLAG.SET:
return convert_set(data.decode(encoding))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(encoding)
elif connection.charset != field_charset:
data = data.decode(encoding)
data = data.encode(connection.encoding)
return data
encoders = {
bool: escape_bool,
int: escape_int,
long_type: escape_int,
float: escape_float,
str: escape_str,
text_type: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
dict: escape_dict,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: str,
}
if not PY2 or JYTHON or IRONPYTHON:
encoders[bytes] = escape_bytes
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: through,
FIELD_TYPE.TINY_BLOB: through,
FIELD_TYPE.MEDIUM_BLOB: through,
FIELD_TYPE.LONG_BLOB: through,
FIELD_TYPE.STRING: through,
FIELD_TYPE.VAR_STRING: through,
FIELD_TYPE.VARCHAR: through,
FIELD_TYPE.DECIMAL: Decimal,
FIELD_TYPE.NEWDECIMAL: Decimal,
}
# for MySQLdb compatibility
conversions = decoders
def Thing2Literal(obj):
return escape_str(str(obj))
|
PyMySQL/Tornado-MySQL | tornado_mysql/converters.py | convert_date | python | def convert_date(obj):
try:
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None | Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/converters.py#L202-L219 | null | from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON
import sys
import binascii
import datetime
from decimal import Decimal
import re
import time
from .constants import FIELD_TYPE, FLAG
from .charset import charset_by_id, charset_to_encoding
ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]")
ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z',
'\'': '\\\'', '"': '\\"', '\\': '\\\\'}
def escape_item(val, charset):
if type(val) in [tuple, list, set]:
return escape_sequence(val, charset)
if type(val) is dict:
return escape_dict(val, charset)
encoder = encoders[type(val)]
val = encoder(val)
return val
def escape_dict(val, charset):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset)
n[k] = quoted
return n
def escape_sequence(val, charset):
n = []
for item in val:
quoted = escape_item(item, charset)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset):
val = map(lambda x: escape_item(x, charset), val)
return ','.join(val)
def escape_bool(value):
return str(int(value))
def escape_object(value):
return str(value)
def escape_int(value):
return str(value)
def escape_float(value):
return ('%.15g' % value)
def escape_string(value):
return ("%s" % (ESCAPE_REGEX.sub(
lambda match: ESCAPE_MAP.get(match.group(0)), value),))
def escape_str(value):
return "'%s'" % escape_string(value)
def escape_unicode(value):
return escape_str(value)
def escape_bytes(value):
return "x'%s'" % binascii.hexlify(value).decode(sys.getdefaultencoding())
def escape_None(value):
return 'NULL'
def escape_timedelta(obj):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
return escape_str('%02d:%02d:%02d' % (hours, minutes, seconds))
def escape_time(obj):
s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute),
int(obj.second))
if obj.microsecond:
s += ".{0:06}".format(obj.microsecond)
return escape_str(s)
def escape_datetime(obj):
return escape_str(obj.isoformat(' '))
def escape_date(obj):
return escape_str(obj.isoformat())
def escape_struct_time(obj):
return escape_datetime(datetime.datetime(*obj[:6]))
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(obj)
try:
ymd, hms = obj.split(sep, 1)
usecs = '0'
if '.' in hms:
hms, usecs = hms.split('.')
usecs = float('0.' + usecs) * 1e6
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':')+[usecs] ])
except ValueError:
return convert_date(obj)
def convert_timedelta(obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
negate = 1
if hours.startswith("-"):
hours = hours[1:]
negate = -1
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = int(microseconds)
) * negate
return tdelta
except ValueError:
return None
def convert_time(obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=int(microseconds))
except ValueError:
return None
def convert_mysql_timestamp(timestamp):
"""Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True
"""
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None
def convert_set(s):
return set(s.split(","))
def through(x):
return x
#def convert_bit(b):
# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
# return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
convert_bit = through
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
encoding = charset_to_encoding(field_charset)
if field.flags & FLAG.SET:
return convert_set(data.decode(encoding))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(encoding)
elif connection.charset != field_charset:
data = data.decode(encoding)
data = data.encode(connection.encoding)
return data
encoders = {
bool: escape_bool,
int: escape_int,
long_type: escape_int,
float: escape_float,
str: escape_str,
text_type: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
dict: escape_dict,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: str,
}
if not PY2 or JYTHON or IRONPYTHON:
encoders[bytes] = escape_bytes
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: through,
FIELD_TYPE.TINY_BLOB: through,
FIELD_TYPE.MEDIUM_BLOB: through,
FIELD_TYPE.LONG_BLOB: through,
FIELD_TYPE.STRING: through,
FIELD_TYPE.VAR_STRING: through,
FIELD_TYPE.VARCHAR: through,
FIELD_TYPE.DECIMAL: Decimal,
FIELD_TYPE.NEWDECIMAL: Decimal,
}
# for MySQLdb compatibility
conversions = decoders
def Thing2Literal(obj):
return escape_str(str(obj))
|
PyMySQL/Tornado-MySQL | tornado_mysql/converters.py | convert_mysql_timestamp | python | def convert_mysql_timestamp(timestamp):
if timestamp[4] == '-':
return convert_datetime(timestamp)
timestamp += "0"*(14-len(timestamp)) # padding
year, month, day, hour, minute, second = \
int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \
int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14])
try:
return datetime.datetime(year, month, day, hour, minute, second)
except ValueError:
return None | Convert a MySQL TIMESTAMP to a Timestamp object.
MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME:
>>> mysql_timestamp_converter('2007-02-25 22:32:17')
datetime.datetime(2007, 2, 25, 22, 32, 17)
MySQL < 4.1 uses a big string of numbers:
>>> mysql_timestamp_converter('20070225223217')
datetime.datetime(2007, 2, 25, 22, 32, 17)
Illegal values are returned as None:
>>> mysql_timestamp_converter('2007-02-31 22:32:17') is None
True
>>> mysql_timestamp_converter('00000000000000') is None
True | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/converters.py#L222-L252 | [
"def convert_datetime(obj):\n \"\"\"Returns a DATETIME or TIMESTAMP column value as a datetime object:\n\n >>> datetime_or_None('2007-02-25 23:06:20')\n datetime.datetime(2007, 2, 25, 23, 6, 20)\n >>> datetime_or_None('2007-02-25T23:06:20')\n datetime.datetime(2007, 2, 25, 23, 6, 20)\n\n I... | from ._compat import PY2, text_type, long_type, JYTHON, IRONPYTHON
import sys
import binascii
import datetime
from decimal import Decimal
import re
import time
from .constants import FIELD_TYPE, FLAG
from .charset import charset_by_id, charset_to_encoding
ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]")
ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z',
'\'': '\\\'', '"': '\\"', '\\': '\\\\'}
def escape_item(val, charset):
if type(val) in [tuple, list, set]:
return escape_sequence(val, charset)
if type(val) is dict:
return escape_dict(val, charset)
encoder = encoders[type(val)]
val = encoder(val)
return val
def escape_dict(val, charset):
n = {}
for k, v in val.items():
quoted = escape_item(v, charset)
n[k] = quoted
return n
def escape_sequence(val, charset):
n = []
for item in val:
quoted = escape_item(item, charset)
n.append(quoted)
return "(" + ",".join(n) + ")"
def escape_set(val, charset):
val = map(lambda x: escape_item(x, charset), val)
return ','.join(val)
def escape_bool(value):
return str(int(value))
def escape_object(value):
return str(value)
def escape_int(value):
return str(value)
def escape_float(value):
return ('%.15g' % value)
def escape_string(value):
return ("%s" % (ESCAPE_REGEX.sub(
lambda match: ESCAPE_MAP.get(match.group(0)), value),))
def escape_str(value):
return "'%s'" % escape_string(value)
def escape_unicode(value):
return escape_str(value)
def escape_bytes(value):
return "x'%s'" % binascii.hexlify(value).decode(sys.getdefaultencoding())
def escape_None(value):
return 'NULL'
def escape_timedelta(obj):
seconds = int(obj.seconds) % 60
minutes = int(obj.seconds // 60) % 60
hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24
return escape_str('%02d:%02d:%02d' % (hours, minutes, seconds))
def escape_time(obj):
s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute),
int(obj.second))
if obj.microsecond:
s += ".{0:06}".format(obj.microsecond)
return escape_str(s)
def escape_datetime(obj):
return escape_str(obj.isoformat(' '))
def escape_date(obj):
return escape_str(obj.isoformat())
def escape_struct_time(obj):
return escape_datetime(datetime.datetime(*obj[:6]))
def convert_datetime(obj):
"""Returns a DATETIME or TIMESTAMP column value as a datetime object:
>>> datetime_or_None('2007-02-25 23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
>>> datetime_or_None('2007-02-25T23:06:20')
datetime.datetime(2007, 2, 25, 23, 6, 20)
Illegal values are returned as None:
>>> datetime_or_None('2007-02-31T23:06:20') is None
True
>>> datetime_or_None('0000-00-00 00:00:00') is None
True
"""
if ' ' in obj:
sep = ' '
elif 'T' in obj:
sep = 'T'
else:
return convert_date(obj)
try:
ymd, hms = obj.split(sep, 1)
usecs = '0'
if '.' in hms:
hms, usecs = hms.split('.')
usecs = float('0.' + usecs) * 1e6
return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':')+[usecs] ])
except ValueError:
return convert_date(obj)
def convert_timedelta(obj):
"""Returns a TIME column as a timedelta object:
>>> timedelta_or_None('25:06:17')
datetime.timedelta(1, 3977)
>>> timedelta_or_None('-25:06:17')
datetime.timedelta(-2, 83177)
Illegal values are returned as None:
>>> timedelta_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
negate = 1
if hours.startswith("-"):
hours = hours[1:]
negate = -1
tdelta = datetime.timedelta(
hours = int(hours),
minutes = int(minutes),
seconds = int(seconds),
microseconds = int(microseconds)
) * negate
return tdelta
except ValueError:
return None
def convert_time(obj):
"""Returns a TIME column as a time object:
>>> time_or_None('15:06:17')
datetime.time(15, 6, 17)
Illegal values are returned as None:
>>> time_or_None('-25:06:17') is None
True
>>> time_or_None('random crap') is None
True
Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but
can accept values as (+|-)DD HH:MM:SS. The latter format will not
be parsed correctly by this function.
Also note that MySQL's TIME column corresponds more closely to
Python's timedelta and not time. However if you want TIME columns
to be treated as time-of-day and not a time offset, then you can
use set this function as the converter for FIELD_TYPE.TIME.
"""
try:
microseconds = 0
if "." in obj:
(obj, tail) = obj.split('.')
microseconds = float('0.' + tail) * 1e6
hours, minutes, seconds = obj.split(':')
return datetime.time(hour=int(hours), minute=int(minutes),
second=int(seconds), microsecond=int(microseconds))
except ValueError:
return None
def convert_date(obj):
"""Returns a DATE column as a date object:
>>> date_or_None('2007-02-26')
datetime.date(2007, 2, 26)
Illegal values are returned as None:
>>> date_or_None('2007-02-31') is None
True
>>> date_or_None('0000-00-00') is None
True
"""
try:
return datetime.date(*[ int(x) for x in obj.split('-', 2) ])
except ValueError:
return None
def convert_set(s):
return set(s.split(","))
def through(x):
return x
#def convert_bit(b):
# b = "\x00" * (8 - len(b)) + b # pad w/ zeroes
# return struct.unpack(">Q", b)[0]
#
# the snippet above is right, but MySQLdb doesn't process bits,
# so we shouldn't either
convert_bit = through
def convert_characters(connection, field, data):
field_charset = charset_by_id(field.charsetnr).name
encoding = charset_to_encoding(field_charset)
if field.flags & FLAG.SET:
return convert_set(data.decode(encoding))
if field.flags & FLAG.BINARY:
return data
if connection.use_unicode:
data = data.decode(encoding)
elif connection.charset != field_charset:
data = data.decode(encoding)
data = data.encode(connection.encoding)
return data
encoders = {
bool: escape_bool,
int: escape_int,
long_type: escape_int,
float: escape_float,
str: escape_str,
text_type: escape_unicode,
tuple: escape_sequence,
list: escape_sequence,
set: escape_sequence,
dict: escape_dict,
type(None): escape_None,
datetime.date: escape_date,
datetime.datetime: escape_datetime,
datetime.timedelta: escape_timedelta,
datetime.time: escape_time,
time.struct_time: escape_struct_time,
Decimal: str,
}
if not PY2 or JYTHON or IRONPYTHON:
encoders[bytes] = escape_bytes
decoders = {
FIELD_TYPE.BIT: convert_bit,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: int,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.LONGLONG: int,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp,
FIELD_TYPE.DATETIME: convert_datetime,
FIELD_TYPE.TIME: convert_timedelta,
FIELD_TYPE.DATE: convert_date,
FIELD_TYPE.SET: convert_set,
FIELD_TYPE.BLOB: through,
FIELD_TYPE.TINY_BLOB: through,
FIELD_TYPE.MEDIUM_BLOB: through,
FIELD_TYPE.LONG_BLOB: through,
FIELD_TYPE.STRING: through,
FIELD_TYPE.VAR_STRING: through,
FIELD_TYPE.VARCHAR: through,
FIELD_TYPE.DECIMAL: Decimal,
FIELD_TYPE.NEWDECIMAL: Decimal,
}
# for MySQLdb compatibility
conversions = decoders
def Thing2Literal(obj):
return escape_str(str(obj))
|
PyMySQL/Tornado-MySQL | tornado_mysql/connections.py | Connection.close | python | def close(self):
stream = self._stream
if stream is None:
return
self._stream = None
stream.close() | Close the socket without sending quit message. | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/connections.py#L588-L594 | null | class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
"""
#: :type: tornado.iostream.IOStream
_stream = None
def __init__(self, host="localhost", user=None, password="",
database=None, port=3306, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=decoders, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=None, ssl=None, read_default_group=None,
compress=None, named_pipe=None, no_delay=False,
autocommit=False, db=None, passwd=None, io_loop=None):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
host: Host where the database server is located
user: Username to log in as
password: Password to use.
database: Database to use, None to not use a particular one.
port: MySQL port to use, default is usually OK.
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
charset: Charset you want to use.
sql_mode: Default SQL_MODE to use.
read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
conv:
Decoders dictionary to use instead of the default one.
This is used to provide custom marshalling of types. See converters.
use_unicode:
Whether or not to default to unicode strings.
This option defaults to true for Py3k.
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
cursorclass: Custom cursor class to use.
init_command: Initial SQL statement to run when connection is established.
connect_timeout: Timeout before throwing an exception when connecting.
ssl:
A dict of arguments similar to mysql_ssl_set()'s parameters.
For now the capath and cipher arguments are not supported.
read_default_group: Group to read from in the configuration file.
compress; Not supported
named_pipe: Not supported
no_delay: Disable Nagle's algorithm on the socket
autocommit: Autocommit mode. None means use server default. (default: False)
io_loop: Tornado IOLoop
db: Alias for database. (for compatibility to MySQLdb)
passwd: Alias for password. (for compatibility to MySQLdb)
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if db is not None and database is None:
database = db
if passwd is not None and not password:
password = passwd
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
if ssl and ('capath' in ssl or 'cipher' in ssl):
raise NotImplementedError('ssl options capath and cipher are not supported')
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
for k in ('key', 'cert', 'ca'):
v = None
if k in ssl:
v = ssl[k]
setattr(self, k, v)
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = configparser.RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, default):
try:
return cfg.get(read_default_group, key)
except Exception:
return default
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
charset = _config("default-character-set", charset)
self.host = host
self.port = port
self.user = user or DEFAULT_USER
self.password = password or ""
self.db = database
self.no_delay = no_delay
self.unix_socket = unix_socket
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES | CLIENT.MULTI_STATEMENTS
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
#: specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
self.encoders = encoders # Need for MySQLdb compatibility.
self.decoders = conv
self.sql_mode = sql_mode
self.init_command = init_command
@gen.coroutine
def close_async(self):
"""Send the quit message and close the socket"""
if self._stream is None or self._stream.closed():
self._stream = None
return
send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT)
yield self._stream.write(send_data)
self.close()
@property
def open(self):
return self._stream is not None
def __del__(self):
self.close()
@gen.coroutine
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
yield self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status &
SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
@gen.coroutine
def _read_ok_packet(self):
pkt = yield self._read_packet()
if not pkt.is_ok_packet():
raise OperationalError(2014, "Command Out of Sync")
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
raise gen.Return(ok)
@gen.coroutine
def _send_autocommit_mode(self):
''' Set whether or not to commit after every execute() '''
yield self._execute_command(
COMMAND.COM_QUERY,
"SET AUTOCOMMIT = %s" % self.escape(self.autocommit_mode))
yield self._read_ok_packet()
@gen.coroutine
def begin(self):
"""Begin transaction."""
yield self._execute_command(COMMAND.COM_QUERY, "BEGIN")
yield self._read_ok_packet()
@gen.coroutine
def commit(self):
''' Commit changes to stable storage '''
yield self._execute_command(COMMAND.COM_QUERY, "COMMIT")
yield self._read_ok_packet()
@gen.coroutine
def rollback(self):
''' Roll back the current transaction '''
yield self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
yield self._read_ok_packet()
@gen.coroutine
def show_warnings(self):
"""SHOW WARNINGS"""
yield self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
yield result.read()
raise gen.Return(result.rows)
@gen.coroutine
def select_db(self, db):
'''Set current db'''
yield self._execute_command(COMMAND.COM_INIT_DB, db)
yield self._read_ok_packet()
def escape(self, obj):
''' Escape whatever value you pass to it '''
if isinstance(obj, str_type):
return "'" + self.escape_string(obj) + "'"
return escape_item(obj, self.charset)
def literal(self, obj):
'''Alias for escape()'''
return self.escape(obj)
def escape_string(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return s.replace("'", "''")
return escape_string(s)
def cursor(self, cursor=None):
''' Create a new cursor to execute queries with '''
if cursor:
return cursor(self)
return self.cursorclass(self)
# The following methods are INTERNAL USE ONLY (called from Cursor)
@gen.coroutine
def query(self, sql, unbuffered=False):
if DEBUG:
print("DEBUG: sending query:", sql)
if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON):
sql = sql.encode(self.encoding)
yield self._execute_command(COMMAND.COM_QUERY, sql)
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
@gen.coroutine
def next_result(self, unbuffered=False):
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
def affected_rows(self):
return self._affected_rows
@gen.coroutine
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
yield self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
yield self._read_ok_packet()
@gen.coroutine
def ping(self, reconnect=True):
"""Check if the server is alive"""
if self._stream is None:
if reconnect:
yield self.connect()
reconnect = False
else:
raise Error("Already closed")
try:
yield self._execute_command(COMMAND.COM_PING, "")
yield self._read_ok_packet()
except Exception:
if reconnect:
yield self.connect()
yield self.ping(False)
else:
raise
@gen.coroutine
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
yield self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
yield self._read_packet()
self.charset = charset
self.encoding = encoding
@gen.coroutine
def connect(self):
#TODO: Set close callback
#raise OperationalError(2006, "MySQL server has gone away (%r)" % (e,))
sock = None
try:
if self.unix_socket and self.host in ('localhost', '127.0.0.1'):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
addr = self.unix_socket
self.host_info = "Localhost via UNIX socket: " + self.unix_socket
else:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
addr = (self.host, self.port)
self.host_info = "socket %s:%d" % (self.host, self.port)
stream = iostream.IOStream(sock)
# TODO: handle connect_timeout
yield stream.connect(addr)
self._stream = stream
if self.no_delay:
stream.set_nodelay(True)
yield self._get_server_information()
yield self._request_authentication()
self.connected_time = self.io_loop.time()
if self.sql_mode is not None:
yield self.query("SET sql_mode=%s" % (self.sql_mode,))
if self.init_command is not None:
yield self.query(self.init_command)
yield self.commit()
if self.autocommit_mode is not None:
yield self.autocommit(self.autocommit_mode)
except BaseException as e:
if sock is not None:
try:
sock.close()
except socket.error:
pass
self._stream = None
if isinstance(e, err.MySQLError):
raise
raise OperationalError(
2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e))
@gen.coroutine
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
try:
while True:
packet_header = yield self._stream.read_bytes(4)
if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
#TODO: check sequence id
recv_data = yield self._stream.read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
if bytes_to_read < MAX_PACKET_LEN:
break
except iostream.StreamClosedError as e:
raise OperationalError(2006, "MySQL server has gone away (%s)" % (e,))
packet = packet_type(buff, self.encoding)
packet.check_error()
raise gen.Return(packet)
def _write_bytes(self, data):
return self._stream.write(data)
@gen.coroutine
def _read_query_result(self, unbuffered=False):
if unbuffered:
try:
result = MySQLResult(self)
yield result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
yield result.read()
self._result = result
self._affected_rows = result.affected_rows
if result.server_status is not None:
self.server_status = result.server_status
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
@gen.coroutine
def _execute_command(self, command, sql):
if not self._stream:
raise InterfaceError("(0, 'Not connected')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None and self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
yield self._result._finish_unbuffered_query()
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
chunk_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
prelude = struct.pack('<iB', chunk_size, command)
self._write_bytes(prelude + sql[:chunk_size-1])
if DEBUG: dump_packet(prelude + sql)
if chunk_size < MAX_PACKET_LEN:
return
seq_id = 1
sql = sql[chunk_size-1:]
while True:
chunk_size = min(MAX_PACKET_LEN, len(sql))
prelude = struct.pack('<i', chunk_size)[:3]
data = prelude + int2byte(seq_id%256) + sql[:chunk_size]
self._write_bytes(data)
if DEBUG: dump_packet(data)
sql = sql[chunk_size:]
if not sql and chunk_size < MAX_PACKET_LEN:
break
seq_id += 1
@gen.coroutine
def _request_authentication(self):
self.client_flag |= CLIENT.CAPABILITIES
if self.server_version.startswith('5'):
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, text_type):
self.user = self.user.encode(self.encoding)
data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'')
next_packet = 1
if self.ssl:
data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init
next_packet += 1
if DEBUG: dump_packet(data)
yield self._write_bytes(data)
yield self._stream.start_tls(
False,
{'keyfile': self.key,
'certfile': self.cert,
'ssl_version': ssl.PROTOCOL_TLSv1,
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': self.ca})
data = data_init + self.user + b'\0' + \
_scramble(self.password.encode('latin1'), self.salt)
if self.db:
if isinstance(self.db, text_type):
self.db = self.db.encode(self.encoding)
data += self.db + int2byte(0)
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
if DEBUG: dump_packet(data)
self._write_bytes(data)
auth_packet = yield self._read_packet()
# if old_passwords is enabled the packet will be 1 byte long and
# have the octet 254
if auth_packet.is_eof_packet():
# send legacy handshake
data = _scramble_323(self.password.encode('latin1'), self.salt) + b'\0'
data = pack_int24(len(data)) + int2byte(next_packet) + data
self._write_bytes(data)
auth_packet = self._read_packet()
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
@gen.coroutine
def _get_server_information(self):
i = 0
packet = yield self._read_packet()
data = packet.get_all_data()
if DEBUG: dump_packet(data)
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(int2byte(0), i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
self.server_language = lang
self.server_charset = charset_by_id(lang).name
self.server_status = stat
if DEBUG: print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG: print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
# TODO: AUTH PLUGIN NAME may appeare here.
def get_server_info(self):
return self.server_version
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
|
PyMySQL/Tornado-MySQL | tornado_mysql/connections.py | Connection.close_async | python | def close_async(self):
if self._stream is None or self._stream.closed():
self._stream = None
return
send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT)
yield self._stream.write(send_data)
self.close() | Send the quit message and close the socket | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/connections.py#L597-L604 | [
"def int2byte(i):\n return struct.pack(\"!B\", i)\n"
] | class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
"""
#: :type: tornado.iostream.IOStream
_stream = None
def __init__(self, host="localhost", user=None, password="",
database=None, port=3306, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=decoders, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=None, ssl=None, read_default_group=None,
compress=None, named_pipe=None, no_delay=False,
autocommit=False, db=None, passwd=None, io_loop=None):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
host: Host where the database server is located
user: Username to log in as
password: Password to use.
database: Database to use, None to not use a particular one.
port: MySQL port to use, default is usually OK.
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
charset: Charset you want to use.
sql_mode: Default SQL_MODE to use.
read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
conv:
Decoders dictionary to use instead of the default one.
This is used to provide custom marshalling of types. See converters.
use_unicode:
Whether or not to default to unicode strings.
This option defaults to true for Py3k.
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
cursorclass: Custom cursor class to use.
init_command: Initial SQL statement to run when connection is established.
connect_timeout: Timeout before throwing an exception when connecting.
ssl:
A dict of arguments similar to mysql_ssl_set()'s parameters.
For now the capath and cipher arguments are not supported.
read_default_group: Group to read from in the configuration file.
compress; Not supported
named_pipe: Not supported
no_delay: Disable Nagle's algorithm on the socket
autocommit: Autocommit mode. None means use server default. (default: False)
io_loop: Tornado IOLoop
db: Alias for database. (for compatibility to MySQLdb)
passwd: Alias for password. (for compatibility to MySQLdb)
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if db is not None and database is None:
database = db
if passwd is not None and not password:
password = passwd
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
if ssl and ('capath' in ssl or 'cipher' in ssl):
raise NotImplementedError('ssl options capath and cipher are not supported')
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
for k in ('key', 'cert', 'ca'):
v = None
if k in ssl:
v = ssl[k]
setattr(self, k, v)
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = configparser.RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, default):
try:
return cfg.get(read_default_group, key)
except Exception:
return default
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
charset = _config("default-character-set", charset)
self.host = host
self.port = port
self.user = user or DEFAULT_USER
self.password = password or ""
self.db = database
self.no_delay = no_delay
self.unix_socket = unix_socket
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES | CLIENT.MULTI_STATEMENTS
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
#: specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
self.encoders = encoders # Need for MySQLdb compatibility.
self.decoders = conv
self.sql_mode = sql_mode
self.init_command = init_command
def close(self):
"""Close the socket without sending quit message."""
stream = self._stream
if stream is None:
return
self._stream = None
stream.close()
@gen.coroutine
@property
def open(self):
return self._stream is not None
def __del__(self):
self.close()
@gen.coroutine
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
yield self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status &
SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
@gen.coroutine
def _read_ok_packet(self):
pkt = yield self._read_packet()
if not pkt.is_ok_packet():
raise OperationalError(2014, "Command Out of Sync")
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
raise gen.Return(ok)
@gen.coroutine
def _send_autocommit_mode(self):
''' Set whether or not to commit after every execute() '''
yield self._execute_command(
COMMAND.COM_QUERY,
"SET AUTOCOMMIT = %s" % self.escape(self.autocommit_mode))
yield self._read_ok_packet()
@gen.coroutine
def begin(self):
"""Begin transaction."""
yield self._execute_command(COMMAND.COM_QUERY, "BEGIN")
yield self._read_ok_packet()
@gen.coroutine
def commit(self):
''' Commit changes to stable storage '''
yield self._execute_command(COMMAND.COM_QUERY, "COMMIT")
yield self._read_ok_packet()
@gen.coroutine
def rollback(self):
''' Roll back the current transaction '''
yield self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
yield self._read_ok_packet()
@gen.coroutine
def show_warnings(self):
"""SHOW WARNINGS"""
yield self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
yield result.read()
raise gen.Return(result.rows)
@gen.coroutine
def select_db(self, db):
'''Set current db'''
yield self._execute_command(COMMAND.COM_INIT_DB, db)
yield self._read_ok_packet()
def escape(self, obj):
''' Escape whatever value you pass to it '''
if isinstance(obj, str_type):
return "'" + self.escape_string(obj) + "'"
return escape_item(obj, self.charset)
def literal(self, obj):
'''Alias for escape()'''
return self.escape(obj)
def escape_string(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return s.replace("'", "''")
return escape_string(s)
def cursor(self, cursor=None):
''' Create a new cursor to execute queries with '''
if cursor:
return cursor(self)
return self.cursorclass(self)
# The following methods are INTERNAL USE ONLY (called from Cursor)
@gen.coroutine
def query(self, sql, unbuffered=False):
if DEBUG:
print("DEBUG: sending query:", sql)
if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON):
sql = sql.encode(self.encoding)
yield self._execute_command(COMMAND.COM_QUERY, sql)
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
@gen.coroutine
def next_result(self, unbuffered=False):
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
def affected_rows(self):
return self._affected_rows
@gen.coroutine
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
yield self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
yield self._read_ok_packet()
@gen.coroutine
def ping(self, reconnect=True):
"""Check if the server is alive"""
if self._stream is None:
if reconnect:
yield self.connect()
reconnect = False
else:
raise Error("Already closed")
try:
yield self._execute_command(COMMAND.COM_PING, "")
yield self._read_ok_packet()
except Exception:
if reconnect:
yield self.connect()
yield self.ping(False)
else:
raise
@gen.coroutine
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
yield self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
yield self._read_packet()
self.charset = charset
self.encoding = encoding
@gen.coroutine
def connect(self):
#TODO: Set close callback
#raise OperationalError(2006, "MySQL server has gone away (%r)" % (e,))
sock = None
try:
if self.unix_socket and self.host in ('localhost', '127.0.0.1'):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
addr = self.unix_socket
self.host_info = "Localhost via UNIX socket: " + self.unix_socket
else:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
addr = (self.host, self.port)
self.host_info = "socket %s:%d" % (self.host, self.port)
stream = iostream.IOStream(sock)
# TODO: handle connect_timeout
yield stream.connect(addr)
self._stream = stream
if self.no_delay:
stream.set_nodelay(True)
yield self._get_server_information()
yield self._request_authentication()
self.connected_time = self.io_loop.time()
if self.sql_mode is not None:
yield self.query("SET sql_mode=%s" % (self.sql_mode,))
if self.init_command is not None:
yield self.query(self.init_command)
yield self.commit()
if self.autocommit_mode is not None:
yield self.autocommit(self.autocommit_mode)
except BaseException as e:
if sock is not None:
try:
sock.close()
except socket.error:
pass
self._stream = None
if isinstance(e, err.MySQLError):
raise
raise OperationalError(
2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e))
@gen.coroutine
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
try:
while True:
packet_header = yield self._stream.read_bytes(4)
if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
#TODO: check sequence id
recv_data = yield self._stream.read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
if bytes_to_read < MAX_PACKET_LEN:
break
except iostream.StreamClosedError as e:
raise OperationalError(2006, "MySQL server has gone away (%s)" % (e,))
packet = packet_type(buff, self.encoding)
packet.check_error()
raise gen.Return(packet)
def _write_bytes(self, data):
return self._stream.write(data)
@gen.coroutine
def _read_query_result(self, unbuffered=False):
if unbuffered:
try:
result = MySQLResult(self)
yield result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
yield result.read()
self._result = result
self._affected_rows = result.affected_rows
if result.server_status is not None:
self.server_status = result.server_status
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
@gen.coroutine
def _execute_command(self, command, sql):
if not self._stream:
raise InterfaceError("(0, 'Not connected')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None and self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
yield self._result._finish_unbuffered_query()
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
chunk_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
prelude = struct.pack('<iB', chunk_size, command)
self._write_bytes(prelude + sql[:chunk_size-1])
if DEBUG: dump_packet(prelude + sql)
if chunk_size < MAX_PACKET_LEN:
return
seq_id = 1
sql = sql[chunk_size-1:]
while True:
chunk_size = min(MAX_PACKET_LEN, len(sql))
prelude = struct.pack('<i', chunk_size)[:3]
data = prelude + int2byte(seq_id%256) + sql[:chunk_size]
self._write_bytes(data)
if DEBUG: dump_packet(data)
sql = sql[chunk_size:]
if not sql and chunk_size < MAX_PACKET_LEN:
break
seq_id += 1
@gen.coroutine
def _request_authentication(self):
self.client_flag |= CLIENT.CAPABILITIES
if self.server_version.startswith('5'):
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, text_type):
self.user = self.user.encode(self.encoding)
data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'')
next_packet = 1
if self.ssl:
data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init
next_packet += 1
if DEBUG: dump_packet(data)
yield self._write_bytes(data)
yield self._stream.start_tls(
False,
{'keyfile': self.key,
'certfile': self.cert,
'ssl_version': ssl.PROTOCOL_TLSv1,
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': self.ca})
data = data_init + self.user + b'\0' + \
_scramble(self.password.encode('latin1'), self.salt)
if self.db:
if isinstance(self.db, text_type):
self.db = self.db.encode(self.encoding)
data += self.db + int2byte(0)
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
if DEBUG: dump_packet(data)
self._write_bytes(data)
auth_packet = yield self._read_packet()
# if old_passwords is enabled the packet will be 1 byte long and
# have the octet 254
if auth_packet.is_eof_packet():
# send legacy handshake
data = _scramble_323(self.password.encode('latin1'), self.salt) + b'\0'
data = pack_int24(len(data)) + int2byte(next_packet) + data
self._write_bytes(data)
auth_packet = self._read_packet()
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
@gen.coroutine
def _get_server_information(self):
i = 0
packet = yield self._read_packet()
data = packet.get_all_data()
if DEBUG: dump_packet(data)
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(int2byte(0), i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
self.server_language = lang
self.server_charset = charset_by_id(lang).name
self.server_status = stat
if DEBUG: print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG: print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
# TODO: AUTH PLUGIN NAME may appeare here.
def get_server_info(self):
return self.server_version
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
|
PyMySQL/Tornado-MySQL | tornado_mysql/connections.py | Connection.show_warnings | python | def show_warnings(self):
yield self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
yield result.read()
raise gen.Return(result.rows) | SHOW WARNINGS | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/connections.py#L660-L665 | null | class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
"""
#: :type: tornado.iostream.IOStream
_stream = None
def __init__(self, host="localhost", user=None, password="",
database=None, port=3306, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=decoders, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=None, ssl=None, read_default_group=None,
compress=None, named_pipe=None, no_delay=False,
autocommit=False, db=None, passwd=None, io_loop=None):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
host: Host where the database server is located
user: Username to log in as
password: Password to use.
database: Database to use, None to not use a particular one.
port: MySQL port to use, default is usually OK.
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
charset: Charset you want to use.
sql_mode: Default SQL_MODE to use.
read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
conv:
Decoders dictionary to use instead of the default one.
This is used to provide custom marshalling of types. See converters.
use_unicode:
Whether or not to default to unicode strings.
This option defaults to true for Py3k.
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
cursorclass: Custom cursor class to use.
init_command: Initial SQL statement to run when connection is established.
connect_timeout: Timeout before throwing an exception when connecting.
ssl:
A dict of arguments similar to mysql_ssl_set()'s parameters.
For now the capath and cipher arguments are not supported.
read_default_group: Group to read from in the configuration file.
compress; Not supported
named_pipe: Not supported
no_delay: Disable Nagle's algorithm on the socket
autocommit: Autocommit mode. None means use server default. (default: False)
io_loop: Tornado IOLoop
db: Alias for database. (for compatibility to MySQLdb)
passwd: Alias for password. (for compatibility to MySQLdb)
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if db is not None and database is None:
database = db
if passwd is not None and not password:
password = passwd
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
if ssl and ('capath' in ssl or 'cipher' in ssl):
raise NotImplementedError('ssl options capath and cipher are not supported')
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
for k in ('key', 'cert', 'ca'):
v = None
if k in ssl:
v = ssl[k]
setattr(self, k, v)
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = configparser.RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, default):
try:
return cfg.get(read_default_group, key)
except Exception:
return default
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
charset = _config("default-character-set", charset)
self.host = host
self.port = port
self.user = user or DEFAULT_USER
self.password = password or ""
self.db = database
self.no_delay = no_delay
self.unix_socket = unix_socket
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES | CLIENT.MULTI_STATEMENTS
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
#: specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
self.encoders = encoders # Need for MySQLdb compatibility.
self.decoders = conv
self.sql_mode = sql_mode
self.init_command = init_command
def close(self):
"""Close the socket without sending quit message."""
stream = self._stream
if stream is None:
return
self._stream = None
stream.close()
@gen.coroutine
def close_async(self):
"""Send the quit message and close the socket"""
if self._stream is None or self._stream.closed():
self._stream = None
return
send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT)
yield self._stream.write(send_data)
self.close()
@property
def open(self):
return self._stream is not None
def __del__(self):
self.close()
@gen.coroutine
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
yield self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status &
SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
@gen.coroutine
def _read_ok_packet(self):
pkt = yield self._read_packet()
if not pkt.is_ok_packet():
raise OperationalError(2014, "Command Out of Sync")
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
raise gen.Return(ok)
@gen.coroutine
def _send_autocommit_mode(self):
''' Set whether or not to commit after every execute() '''
yield self._execute_command(
COMMAND.COM_QUERY,
"SET AUTOCOMMIT = %s" % self.escape(self.autocommit_mode))
yield self._read_ok_packet()
@gen.coroutine
def begin(self):
"""Begin transaction."""
yield self._execute_command(COMMAND.COM_QUERY, "BEGIN")
yield self._read_ok_packet()
@gen.coroutine
def commit(self):
''' Commit changes to stable storage '''
yield self._execute_command(COMMAND.COM_QUERY, "COMMIT")
yield self._read_ok_packet()
@gen.coroutine
def rollback(self):
''' Roll back the current transaction '''
yield self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
yield self._read_ok_packet()
@gen.coroutine
@gen.coroutine
def select_db(self, db):
'''Set current db'''
yield self._execute_command(COMMAND.COM_INIT_DB, db)
yield self._read_ok_packet()
def escape(self, obj):
''' Escape whatever value you pass to it '''
if isinstance(obj, str_type):
return "'" + self.escape_string(obj) + "'"
return escape_item(obj, self.charset)
def literal(self, obj):
'''Alias for escape()'''
return self.escape(obj)
def escape_string(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return s.replace("'", "''")
return escape_string(s)
def cursor(self, cursor=None):
''' Create a new cursor to execute queries with '''
if cursor:
return cursor(self)
return self.cursorclass(self)
# The following methods are INTERNAL USE ONLY (called from Cursor)
@gen.coroutine
def query(self, sql, unbuffered=False):
if DEBUG:
print("DEBUG: sending query:", sql)
if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON):
sql = sql.encode(self.encoding)
yield self._execute_command(COMMAND.COM_QUERY, sql)
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
@gen.coroutine
def next_result(self, unbuffered=False):
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
def affected_rows(self):
return self._affected_rows
@gen.coroutine
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
yield self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
yield self._read_ok_packet()
@gen.coroutine
def ping(self, reconnect=True):
"""Check if the server is alive"""
if self._stream is None:
if reconnect:
yield self.connect()
reconnect = False
else:
raise Error("Already closed")
try:
yield self._execute_command(COMMAND.COM_PING, "")
yield self._read_ok_packet()
except Exception:
if reconnect:
yield self.connect()
yield self.ping(False)
else:
raise
@gen.coroutine
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
yield self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
yield self._read_packet()
self.charset = charset
self.encoding = encoding
@gen.coroutine
def connect(self):
#TODO: Set close callback
#raise OperationalError(2006, "MySQL server has gone away (%r)" % (e,))
sock = None
try:
if self.unix_socket and self.host in ('localhost', '127.0.0.1'):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
addr = self.unix_socket
self.host_info = "Localhost via UNIX socket: " + self.unix_socket
else:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
addr = (self.host, self.port)
self.host_info = "socket %s:%d" % (self.host, self.port)
stream = iostream.IOStream(sock)
# TODO: handle connect_timeout
yield stream.connect(addr)
self._stream = stream
if self.no_delay:
stream.set_nodelay(True)
yield self._get_server_information()
yield self._request_authentication()
self.connected_time = self.io_loop.time()
if self.sql_mode is not None:
yield self.query("SET sql_mode=%s" % (self.sql_mode,))
if self.init_command is not None:
yield self.query(self.init_command)
yield self.commit()
if self.autocommit_mode is not None:
yield self.autocommit(self.autocommit_mode)
except BaseException as e:
if sock is not None:
try:
sock.close()
except socket.error:
pass
self._stream = None
if isinstance(e, err.MySQLError):
raise
raise OperationalError(
2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e))
@gen.coroutine
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
try:
while True:
packet_header = yield self._stream.read_bytes(4)
if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
#TODO: check sequence id
recv_data = yield self._stream.read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
if bytes_to_read < MAX_PACKET_LEN:
break
except iostream.StreamClosedError as e:
raise OperationalError(2006, "MySQL server has gone away (%s)" % (e,))
packet = packet_type(buff, self.encoding)
packet.check_error()
raise gen.Return(packet)
def _write_bytes(self, data):
return self._stream.write(data)
@gen.coroutine
def _read_query_result(self, unbuffered=False):
if unbuffered:
try:
result = MySQLResult(self)
yield result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
yield result.read()
self._result = result
self._affected_rows = result.affected_rows
if result.server_status is not None:
self.server_status = result.server_status
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
@gen.coroutine
def _execute_command(self, command, sql):
if not self._stream:
raise InterfaceError("(0, 'Not connected')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None and self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
yield self._result._finish_unbuffered_query()
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
chunk_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
prelude = struct.pack('<iB', chunk_size, command)
self._write_bytes(prelude + sql[:chunk_size-1])
if DEBUG: dump_packet(prelude + sql)
if chunk_size < MAX_PACKET_LEN:
return
seq_id = 1
sql = sql[chunk_size-1:]
while True:
chunk_size = min(MAX_PACKET_LEN, len(sql))
prelude = struct.pack('<i', chunk_size)[:3]
data = prelude + int2byte(seq_id%256) + sql[:chunk_size]
self._write_bytes(data)
if DEBUG: dump_packet(data)
sql = sql[chunk_size:]
if not sql and chunk_size < MAX_PACKET_LEN:
break
seq_id += 1
@gen.coroutine
def _request_authentication(self):
self.client_flag |= CLIENT.CAPABILITIES
if self.server_version.startswith('5'):
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, text_type):
self.user = self.user.encode(self.encoding)
data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'')
next_packet = 1
if self.ssl:
data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init
next_packet += 1
if DEBUG: dump_packet(data)
yield self._write_bytes(data)
yield self._stream.start_tls(
False,
{'keyfile': self.key,
'certfile': self.cert,
'ssl_version': ssl.PROTOCOL_TLSv1,
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': self.ca})
data = data_init + self.user + b'\0' + \
_scramble(self.password.encode('latin1'), self.salt)
if self.db:
if isinstance(self.db, text_type):
self.db = self.db.encode(self.encoding)
data += self.db + int2byte(0)
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
if DEBUG: dump_packet(data)
self._write_bytes(data)
auth_packet = yield self._read_packet()
# if old_passwords is enabled the packet will be 1 byte long and
# have the octet 254
if auth_packet.is_eof_packet():
# send legacy handshake
data = _scramble_323(self.password.encode('latin1'), self.salt) + b'\0'
data = pack_int24(len(data)) + int2byte(next_packet) + data
self._write_bytes(data)
auth_packet = self._read_packet()
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
@gen.coroutine
def _get_server_information(self):
i = 0
packet = yield self._read_packet()
data = packet.get_all_data()
if DEBUG: dump_packet(data)
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(int2byte(0), i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
self.server_language = lang
self.server_charset = charset_by_id(lang).name
self.server_status = stat
if DEBUG: print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG: print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
# TODO: AUTH PLUGIN NAME may appeare here.
def get_server_info(self):
return self.server_version
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
|
PyMySQL/Tornado-MySQL | tornado_mysql/connections.py | Connection.select_db | python | def select_db(self, db):
'''Set current db'''
yield self._execute_command(COMMAND.COM_INIT_DB, db)
yield self._read_ok_packet() | Set current db | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/connections.py#L668-L671 | null | class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
"""
#: :type: tornado.iostream.IOStream
_stream = None
def __init__(self, host="localhost", user=None, password="",
database=None, port=3306, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=decoders, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=None, ssl=None, read_default_group=None,
compress=None, named_pipe=None, no_delay=False,
autocommit=False, db=None, passwd=None, io_loop=None):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
host: Host where the database server is located
user: Username to log in as
password: Password to use.
database: Database to use, None to not use a particular one.
port: MySQL port to use, default is usually OK.
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
charset: Charset you want to use.
sql_mode: Default SQL_MODE to use.
read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
conv:
Decoders dictionary to use instead of the default one.
This is used to provide custom marshalling of types. See converters.
use_unicode:
Whether or not to default to unicode strings.
This option defaults to true for Py3k.
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
cursorclass: Custom cursor class to use.
init_command: Initial SQL statement to run when connection is established.
connect_timeout: Timeout before throwing an exception when connecting.
ssl:
A dict of arguments similar to mysql_ssl_set()'s parameters.
For now the capath and cipher arguments are not supported.
read_default_group: Group to read from in the configuration file.
compress; Not supported
named_pipe: Not supported
no_delay: Disable Nagle's algorithm on the socket
autocommit: Autocommit mode. None means use server default. (default: False)
io_loop: Tornado IOLoop
db: Alias for database. (for compatibility to MySQLdb)
passwd: Alias for password. (for compatibility to MySQLdb)
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if db is not None and database is None:
database = db
if passwd is not None and not password:
password = passwd
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
if ssl and ('capath' in ssl or 'cipher' in ssl):
raise NotImplementedError('ssl options capath and cipher are not supported')
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
for k in ('key', 'cert', 'ca'):
v = None
if k in ssl:
v = ssl[k]
setattr(self, k, v)
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = configparser.RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, default):
try:
return cfg.get(read_default_group, key)
except Exception:
return default
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
charset = _config("default-character-set", charset)
self.host = host
self.port = port
self.user = user or DEFAULT_USER
self.password = password or ""
self.db = database
self.no_delay = no_delay
self.unix_socket = unix_socket
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES | CLIENT.MULTI_STATEMENTS
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
#: specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
self.encoders = encoders # Need for MySQLdb compatibility.
self.decoders = conv
self.sql_mode = sql_mode
self.init_command = init_command
def close(self):
"""Close the socket without sending quit message."""
stream = self._stream
if stream is None:
return
self._stream = None
stream.close()
@gen.coroutine
def close_async(self):
"""Send the quit message and close the socket"""
if self._stream is None or self._stream.closed():
self._stream = None
return
send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT)
yield self._stream.write(send_data)
self.close()
@property
def open(self):
return self._stream is not None
def __del__(self):
self.close()
@gen.coroutine
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
yield self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status &
SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
@gen.coroutine
def _read_ok_packet(self):
pkt = yield self._read_packet()
if not pkt.is_ok_packet():
raise OperationalError(2014, "Command Out of Sync")
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
raise gen.Return(ok)
@gen.coroutine
def _send_autocommit_mode(self):
''' Set whether or not to commit after every execute() '''
yield self._execute_command(
COMMAND.COM_QUERY,
"SET AUTOCOMMIT = %s" % self.escape(self.autocommit_mode))
yield self._read_ok_packet()
@gen.coroutine
def begin(self):
"""Begin transaction."""
yield self._execute_command(COMMAND.COM_QUERY, "BEGIN")
yield self._read_ok_packet()
@gen.coroutine
def commit(self):
''' Commit changes to stable storage '''
yield self._execute_command(COMMAND.COM_QUERY, "COMMIT")
yield self._read_ok_packet()
@gen.coroutine
def rollback(self):
''' Roll back the current transaction '''
yield self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
yield self._read_ok_packet()
@gen.coroutine
def show_warnings(self):
"""SHOW WARNINGS"""
yield self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
yield result.read()
raise gen.Return(result.rows)
@gen.coroutine
def escape(self, obj):
''' Escape whatever value you pass to it '''
if isinstance(obj, str_type):
return "'" + self.escape_string(obj) + "'"
return escape_item(obj, self.charset)
def literal(self, obj):
'''Alias for escape()'''
return self.escape(obj)
def escape_string(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return s.replace("'", "''")
return escape_string(s)
def cursor(self, cursor=None):
''' Create a new cursor to execute queries with '''
if cursor:
return cursor(self)
return self.cursorclass(self)
# The following methods are INTERNAL USE ONLY (called from Cursor)
@gen.coroutine
def query(self, sql, unbuffered=False):
if DEBUG:
print("DEBUG: sending query:", sql)
if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON):
sql = sql.encode(self.encoding)
yield self._execute_command(COMMAND.COM_QUERY, sql)
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
@gen.coroutine
def next_result(self, unbuffered=False):
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
def affected_rows(self):
return self._affected_rows
@gen.coroutine
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
yield self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
yield self._read_ok_packet()
@gen.coroutine
def ping(self, reconnect=True):
"""Check if the server is alive"""
if self._stream is None:
if reconnect:
yield self.connect()
reconnect = False
else:
raise Error("Already closed")
try:
yield self._execute_command(COMMAND.COM_PING, "")
yield self._read_ok_packet()
except Exception:
if reconnect:
yield self.connect()
yield self.ping(False)
else:
raise
@gen.coroutine
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
yield self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
yield self._read_packet()
self.charset = charset
self.encoding = encoding
@gen.coroutine
def connect(self):
#TODO: Set close callback
#raise OperationalError(2006, "MySQL server has gone away (%r)" % (e,))
sock = None
try:
if self.unix_socket and self.host in ('localhost', '127.0.0.1'):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
addr = self.unix_socket
self.host_info = "Localhost via UNIX socket: " + self.unix_socket
else:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
addr = (self.host, self.port)
self.host_info = "socket %s:%d" % (self.host, self.port)
stream = iostream.IOStream(sock)
# TODO: handle connect_timeout
yield stream.connect(addr)
self._stream = stream
if self.no_delay:
stream.set_nodelay(True)
yield self._get_server_information()
yield self._request_authentication()
self.connected_time = self.io_loop.time()
if self.sql_mode is not None:
yield self.query("SET sql_mode=%s" % (self.sql_mode,))
if self.init_command is not None:
yield self.query(self.init_command)
yield self.commit()
if self.autocommit_mode is not None:
yield self.autocommit(self.autocommit_mode)
except BaseException as e:
if sock is not None:
try:
sock.close()
except socket.error:
pass
self._stream = None
if isinstance(e, err.MySQLError):
raise
raise OperationalError(
2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e))
@gen.coroutine
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
try:
while True:
packet_header = yield self._stream.read_bytes(4)
if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
#TODO: check sequence id
recv_data = yield self._stream.read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
if bytes_to_read < MAX_PACKET_LEN:
break
except iostream.StreamClosedError as e:
raise OperationalError(2006, "MySQL server has gone away (%s)" % (e,))
packet = packet_type(buff, self.encoding)
packet.check_error()
raise gen.Return(packet)
def _write_bytes(self, data):
return self._stream.write(data)
@gen.coroutine
def _read_query_result(self, unbuffered=False):
if unbuffered:
try:
result = MySQLResult(self)
yield result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
yield result.read()
self._result = result
self._affected_rows = result.affected_rows
if result.server_status is not None:
self.server_status = result.server_status
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
@gen.coroutine
def _execute_command(self, command, sql):
if not self._stream:
raise InterfaceError("(0, 'Not connected')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None and self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
yield self._result._finish_unbuffered_query()
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
chunk_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
prelude = struct.pack('<iB', chunk_size, command)
self._write_bytes(prelude + sql[:chunk_size-1])
if DEBUG: dump_packet(prelude + sql)
if chunk_size < MAX_PACKET_LEN:
return
seq_id = 1
sql = sql[chunk_size-1:]
while True:
chunk_size = min(MAX_PACKET_LEN, len(sql))
prelude = struct.pack('<i', chunk_size)[:3]
data = prelude + int2byte(seq_id%256) + sql[:chunk_size]
self._write_bytes(data)
if DEBUG: dump_packet(data)
sql = sql[chunk_size:]
if not sql and chunk_size < MAX_PACKET_LEN:
break
seq_id += 1
@gen.coroutine
def _request_authentication(self):
self.client_flag |= CLIENT.CAPABILITIES
if self.server_version.startswith('5'):
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, text_type):
self.user = self.user.encode(self.encoding)
data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'')
next_packet = 1
if self.ssl:
data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init
next_packet += 1
if DEBUG: dump_packet(data)
yield self._write_bytes(data)
yield self._stream.start_tls(
False,
{'keyfile': self.key,
'certfile': self.cert,
'ssl_version': ssl.PROTOCOL_TLSv1,
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': self.ca})
data = data_init + self.user + b'\0' + \
_scramble(self.password.encode('latin1'), self.salt)
if self.db:
if isinstance(self.db, text_type):
self.db = self.db.encode(self.encoding)
data += self.db + int2byte(0)
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
if DEBUG: dump_packet(data)
self._write_bytes(data)
auth_packet = yield self._read_packet()
# if old_passwords is enabled the packet will be 1 byte long and
# have the octet 254
if auth_packet.is_eof_packet():
# send legacy handshake
data = _scramble_323(self.password.encode('latin1'), self.salt) + b'\0'
data = pack_int24(len(data)) + int2byte(next_packet) + data
self._write_bytes(data)
auth_packet = self._read_packet()
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
@gen.coroutine
def _get_server_information(self):
i = 0
packet = yield self._read_packet()
data = packet.get_all_data()
if DEBUG: dump_packet(data)
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(int2byte(0), i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
self.server_language = lang
self.server_charset = charset_by_id(lang).name
self.server_status = stat
if DEBUG: print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG: print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
# TODO: AUTH PLUGIN NAME may appeare here.
def get_server_info(self):
return self.server_version
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
|
PyMySQL/Tornado-MySQL | tornado_mysql/connections.py | Connection.escape | python | def escape(self, obj):
''' Escape whatever value you pass to it '''
if isinstance(obj, str_type):
return "'" + self.escape_string(obj) + "'"
return escape_item(obj, self.charset) | Escape whatever value you pass to it | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/connections.py#L673-L677 | [
"def escape_item(val, charset):\n if type(val) in [tuple, list, set]:\n return escape_sequence(val, charset)\n if type(val) is dict:\n return escape_dict(val, charset)\n encoder = encoders[type(val)]\n val = encoder(val)\n return val\n",
"def escape_string(self, s):\n if (self.serv... | class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
"""
#: :type: tornado.iostream.IOStream
_stream = None
def __init__(self, host="localhost", user=None, password="",
database=None, port=3306, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=decoders, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=None, ssl=None, read_default_group=None,
compress=None, named_pipe=None, no_delay=False,
autocommit=False, db=None, passwd=None, io_loop=None):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
host: Host where the database server is located
user: Username to log in as
password: Password to use.
database: Database to use, None to not use a particular one.
port: MySQL port to use, default is usually OK.
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
charset: Charset you want to use.
sql_mode: Default SQL_MODE to use.
read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
conv:
Decoders dictionary to use instead of the default one.
This is used to provide custom marshalling of types. See converters.
use_unicode:
Whether or not to default to unicode strings.
This option defaults to true for Py3k.
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
cursorclass: Custom cursor class to use.
init_command: Initial SQL statement to run when connection is established.
connect_timeout: Timeout before throwing an exception when connecting.
ssl:
A dict of arguments similar to mysql_ssl_set()'s parameters.
For now the capath and cipher arguments are not supported.
read_default_group: Group to read from in the configuration file.
compress; Not supported
named_pipe: Not supported
no_delay: Disable Nagle's algorithm on the socket
autocommit: Autocommit mode. None means use server default. (default: False)
io_loop: Tornado IOLoop
db: Alias for database. (for compatibility to MySQLdb)
passwd: Alias for password. (for compatibility to MySQLdb)
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if db is not None and database is None:
database = db
if passwd is not None and not password:
password = passwd
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
if ssl and ('capath' in ssl or 'cipher' in ssl):
raise NotImplementedError('ssl options capath and cipher are not supported')
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
for k in ('key', 'cert', 'ca'):
v = None
if k in ssl:
v = ssl[k]
setattr(self, k, v)
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = configparser.RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, default):
try:
return cfg.get(read_default_group, key)
except Exception:
return default
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
charset = _config("default-character-set", charset)
self.host = host
self.port = port
self.user = user or DEFAULT_USER
self.password = password or ""
self.db = database
self.no_delay = no_delay
self.unix_socket = unix_socket
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES | CLIENT.MULTI_STATEMENTS
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
#: specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
self.encoders = encoders # Need for MySQLdb compatibility.
self.decoders = conv
self.sql_mode = sql_mode
self.init_command = init_command
def close(self):
"""Close the socket without sending quit message."""
stream = self._stream
if stream is None:
return
self._stream = None
stream.close()
@gen.coroutine
def close_async(self):
"""Send the quit message and close the socket"""
if self._stream is None or self._stream.closed():
self._stream = None
return
send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT)
yield self._stream.write(send_data)
self.close()
@property
def open(self):
return self._stream is not None
def __del__(self):
self.close()
@gen.coroutine
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
yield self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status &
SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
@gen.coroutine
def _read_ok_packet(self):
pkt = yield self._read_packet()
if not pkt.is_ok_packet():
raise OperationalError(2014, "Command Out of Sync")
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
raise gen.Return(ok)
@gen.coroutine
def _send_autocommit_mode(self):
''' Set whether or not to commit after every execute() '''
yield self._execute_command(
COMMAND.COM_QUERY,
"SET AUTOCOMMIT = %s" % self.escape(self.autocommit_mode))
yield self._read_ok_packet()
@gen.coroutine
def begin(self):
"""Begin transaction."""
yield self._execute_command(COMMAND.COM_QUERY, "BEGIN")
yield self._read_ok_packet()
@gen.coroutine
def commit(self):
''' Commit changes to stable storage '''
yield self._execute_command(COMMAND.COM_QUERY, "COMMIT")
yield self._read_ok_packet()
@gen.coroutine
def rollback(self):
''' Roll back the current transaction '''
yield self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
yield self._read_ok_packet()
@gen.coroutine
def show_warnings(self):
"""SHOW WARNINGS"""
yield self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
yield result.read()
raise gen.Return(result.rows)
@gen.coroutine
def select_db(self, db):
'''Set current db'''
yield self._execute_command(COMMAND.COM_INIT_DB, db)
yield self._read_ok_packet()
def literal(self, obj):
'''Alias for escape()'''
return self.escape(obj)
def escape_string(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return s.replace("'", "''")
return escape_string(s)
def cursor(self, cursor=None):
''' Create a new cursor to execute queries with '''
if cursor:
return cursor(self)
return self.cursorclass(self)
# The following methods are INTERNAL USE ONLY (called from Cursor)
@gen.coroutine
def query(self, sql, unbuffered=False):
if DEBUG:
print("DEBUG: sending query:", sql)
if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON):
sql = sql.encode(self.encoding)
yield self._execute_command(COMMAND.COM_QUERY, sql)
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
@gen.coroutine
def next_result(self, unbuffered=False):
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
def affected_rows(self):
return self._affected_rows
@gen.coroutine
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
yield self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
yield self._read_ok_packet()
@gen.coroutine
def ping(self, reconnect=True):
"""Check if the server is alive"""
if self._stream is None:
if reconnect:
yield self.connect()
reconnect = False
else:
raise Error("Already closed")
try:
yield self._execute_command(COMMAND.COM_PING, "")
yield self._read_ok_packet()
except Exception:
if reconnect:
yield self.connect()
yield self.ping(False)
else:
raise
@gen.coroutine
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
yield self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
yield self._read_packet()
self.charset = charset
self.encoding = encoding
@gen.coroutine
def connect(self):
#TODO: Set close callback
#raise OperationalError(2006, "MySQL server has gone away (%r)" % (e,))
sock = None
try:
if self.unix_socket and self.host in ('localhost', '127.0.0.1'):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
addr = self.unix_socket
self.host_info = "Localhost via UNIX socket: " + self.unix_socket
else:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
addr = (self.host, self.port)
self.host_info = "socket %s:%d" % (self.host, self.port)
stream = iostream.IOStream(sock)
# TODO: handle connect_timeout
yield stream.connect(addr)
self._stream = stream
if self.no_delay:
stream.set_nodelay(True)
yield self._get_server_information()
yield self._request_authentication()
self.connected_time = self.io_loop.time()
if self.sql_mode is not None:
yield self.query("SET sql_mode=%s" % (self.sql_mode,))
if self.init_command is not None:
yield self.query(self.init_command)
yield self.commit()
if self.autocommit_mode is not None:
yield self.autocommit(self.autocommit_mode)
except BaseException as e:
if sock is not None:
try:
sock.close()
except socket.error:
pass
self._stream = None
if isinstance(e, err.MySQLError):
raise
raise OperationalError(
2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e))
@gen.coroutine
def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
try:
while True:
packet_header = yield self._stream.read_bytes(4)
if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
#TODO: check sequence id
recv_data = yield self._stream.read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
if bytes_to_read < MAX_PACKET_LEN:
break
except iostream.StreamClosedError as e:
raise OperationalError(2006, "MySQL server has gone away (%s)" % (e,))
packet = packet_type(buff, self.encoding)
packet.check_error()
raise gen.Return(packet)
def _write_bytes(self, data):
return self._stream.write(data)
@gen.coroutine
def _read_query_result(self, unbuffered=False):
if unbuffered:
try:
result = MySQLResult(self)
yield result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
yield result.read()
self._result = result
self._affected_rows = result.affected_rows
if result.server_status is not None:
self.server_status = result.server_status
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
@gen.coroutine
def _execute_command(self, command, sql):
if not self._stream:
raise InterfaceError("(0, 'Not connected')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None and self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
yield self._result._finish_unbuffered_query()
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
chunk_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
prelude = struct.pack('<iB', chunk_size, command)
self._write_bytes(prelude + sql[:chunk_size-1])
if DEBUG: dump_packet(prelude + sql)
if chunk_size < MAX_PACKET_LEN:
return
seq_id = 1
sql = sql[chunk_size-1:]
while True:
chunk_size = min(MAX_PACKET_LEN, len(sql))
prelude = struct.pack('<i', chunk_size)[:3]
data = prelude + int2byte(seq_id%256) + sql[:chunk_size]
self._write_bytes(data)
if DEBUG: dump_packet(data)
sql = sql[chunk_size:]
if not sql and chunk_size < MAX_PACKET_LEN:
break
seq_id += 1
@gen.coroutine
def _request_authentication(self):
self.client_flag |= CLIENT.CAPABILITIES
if self.server_version.startswith('5'):
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, text_type):
self.user = self.user.encode(self.encoding)
data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'')
next_packet = 1
if self.ssl:
data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init
next_packet += 1
if DEBUG: dump_packet(data)
yield self._write_bytes(data)
yield self._stream.start_tls(
False,
{'keyfile': self.key,
'certfile': self.cert,
'ssl_version': ssl.PROTOCOL_TLSv1,
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': self.ca})
data = data_init + self.user + b'\0' + \
_scramble(self.password.encode('latin1'), self.salt)
if self.db:
if isinstance(self.db, text_type):
self.db = self.db.encode(self.encoding)
data += self.db + int2byte(0)
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
if DEBUG: dump_packet(data)
self._write_bytes(data)
auth_packet = yield self._read_packet()
# if old_passwords is enabled the packet will be 1 byte long and
# have the octet 254
if auth_packet.is_eof_packet():
# send legacy handshake
data = _scramble_323(self.password.encode('latin1'), self.salt) + b'\0'
data = pack_int24(len(data)) + int2byte(next_packet) + data
self._write_bytes(data)
auth_packet = self._read_packet()
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
@gen.coroutine
def _get_server_information(self):
i = 0
packet = yield self._read_packet()
data = packet.get_all_data()
if DEBUG: dump_packet(data)
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(int2byte(0), i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
self.server_language = lang
self.server_charset = charset_by_id(lang).name
self.server_status = stat
if DEBUG: print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG: print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
# TODO: AUTH PLUGIN NAME may appeare here.
def get_server_info(self):
return self.server_version
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
|
PyMySQL/Tornado-MySQL | tornado_mysql/connections.py | Connection._read_packet | python | def _read_packet(self, packet_type=MysqlPacket):
buff = b''
try:
while True:
packet_header = yield self._stream.read_bytes(4)
if DEBUG: dump_packet(packet_header)
btrl, btrh, packet_number = struct.unpack('<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
#TODO: check sequence id
recv_data = yield self._stream.read_bytes(bytes_to_read)
if DEBUG: dump_packet(recv_data)
buff += recv_data
if bytes_to_read < MAX_PACKET_LEN:
break
except iostream.StreamClosedError as e:
raise OperationalError(2006, "MySQL server has gone away (%s)" % (e,))
packet = packet_type(buff, self.encoding)
packet.check_error()
raise gen.Return(packet) | Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results. | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/connections.py#L798-L820 | null | class Connection(object):
"""
Representation of a socket with a mysql server.
The proper way to get an instance of this class is to call
connect().
"""
#: :type: tornado.iostream.IOStream
_stream = None
def __init__(self, host="localhost", user=None, password="",
database=None, port=3306, unix_socket=None,
charset='', sql_mode=None,
read_default_file=None, conv=decoders, use_unicode=None,
client_flag=0, cursorclass=Cursor, init_command=None,
connect_timeout=None, ssl=None, read_default_group=None,
compress=None, named_pipe=None, no_delay=False,
autocommit=False, db=None, passwd=None, io_loop=None):
"""
Establish a connection to the MySQL database. Accepts several
arguments:
host: Host where the database server is located
user: Username to log in as
password: Password to use.
database: Database to use, None to not use a particular one.
port: MySQL port to use, default is usually OK.
unix_socket: Optionally, you can use a unix socket rather than TCP/IP.
charset: Charset you want to use.
sql_mode: Default SQL_MODE to use.
read_default_file:
Specifies my.cnf file to read these parameters from under the [client] section.
conv:
Decoders dictionary to use instead of the default one.
This is used to provide custom marshalling of types. See converters.
use_unicode:
Whether or not to default to unicode strings.
This option defaults to true for Py3k.
client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT.
cursorclass: Custom cursor class to use.
init_command: Initial SQL statement to run when connection is established.
connect_timeout: Timeout before throwing an exception when connecting.
ssl:
A dict of arguments similar to mysql_ssl_set()'s parameters.
For now the capath and cipher arguments are not supported.
read_default_group: Group to read from in the configuration file.
compress; Not supported
named_pipe: Not supported
no_delay: Disable Nagle's algorithm on the socket
autocommit: Autocommit mode. None means use server default. (default: False)
io_loop: Tornado IOLoop
db: Alias for database. (for compatibility to MySQLdb)
passwd: Alias for password. (for compatibility to MySQLdb)
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
if use_unicode is None and sys.version_info[0] > 2:
use_unicode = True
if db is not None and database is None:
database = db
if passwd is not None and not password:
password = passwd
if compress or named_pipe:
raise NotImplementedError("compress and named_pipe arguments are not supported")
if ssl and ('capath' in ssl or 'cipher' in ssl):
raise NotImplementedError('ssl options capath and cipher are not supported')
self.ssl = False
if ssl:
if not SSL_ENABLED:
raise NotImplementedError("ssl module not found")
self.ssl = True
client_flag |= CLIENT.SSL
for k in ('key', 'cert', 'ca'):
v = None
if k in ssl:
v = ssl[k]
setattr(self, k, v)
if read_default_group and not read_default_file:
if sys.platform.startswith("win"):
read_default_file = "c:\\my.ini"
else:
read_default_file = "/etc/my.cnf"
if read_default_file:
if not read_default_group:
read_default_group = "client"
cfg = configparser.RawConfigParser()
cfg.read(os.path.expanduser(read_default_file))
def _config(key, default):
try:
return cfg.get(read_default_group, key)
except Exception:
return default
user = _config("user", user)
password = _config("password", password)
host = _config("host", host)
database = _config("database", database)
unix_socket = _config("socket", unix_socket)
port = int(_config("port", port))
charset = _config("default-character-set", charset)
self.host = host
self.port = port
self.user = user or DEFAULT_USER
self.password = password or ""
self.db = database
self.no_delay = no_delay
self.unix_socket = unix_socket
if charset:
self.charset = charset
self.use_unicode = True
else:
self.charset = DEFAULT_CHARSET
self.use_unicode = False
if use_unicode is not None:
self.use_unicode = use_unicode
self.encoding = charset_by_name(self.charset).encoding
client_flag |= CLIENT.CAPABILITIES | CLIENT.MULTI_STATEMENTS
if self.db:
client_flag |= CLIENT.CONNECT_WITH_DB
self.client_flag = client_flag
self.cursorclass = cursorclass
self.connect_timeout = connect_timeout
self._result = None
self._affected_rows = 0
self.host_info = "Not connected"
#: specified autocommit mode. None means use server default.
self.autocommit_mode = autocommit
self.encoders = encoders # Need for MySQLdb compatibility.
self.decoders = conv
self.sql_mode = sql_mode
self.init_command = init_command
def close(self):
"""Close the socket without sending quit message."""
stream = self._stream
if stream is None:
return
self._stream = None
stream.close()
@gen.coroutine
def close_async(self):
"""Send the quit message and close the socket"""
if self._stream is None or self._stream.closed():
self._stream = None
return
send_data = struct.pack('<i', 1) + int2byte(COMMAND.COM_QUIT)
yield self._stream.write(send_data)
self.close()
@property
def open(self):
return self._stream is not None
def __del__(self):
self.close()
@gen.coroutine
def autocommit(self, value):
self.autocommit_mode = bool(value)
current = self.get_autocommit()
if value != current:
yield self._send_autocommit_mode()
def get_autocommit(self):
return bool(self.server_status &
SERVER_STATUS.SERVER_STATUS_AUTOCOMMIT)
@gen.coroutine
def _read_ok_packet(self):
pkt = yield self._read_packet()
if not pkt.is_ok_packet():
raise OperationalError(2014, "Command Out of Sync")
ok = OKPacketWrapper(pkt)
self.server_status = ok.server_status
raise gen.Return(ok)
@gen.coroutine
def _send_autocommit_mode(self):
''' Set whether or not to commit after every execute() '''
yield self._execute_command(
COMMAND.COM_QUERY,
"SET AUTOCOMMIT = %s" % self.escape(self.autocommit_mode))
yield self._read_ok_packet()
@gen.coroutine
def begin(self):
"""Begin transaction."""
yield self._execute_command(COMMAND.COM_QUERY, "BEGIN")
yield self._read_ok_packet()
@gen.coroutine
def commit(self):
''' Commit changes to stable storage '''
yield self._execute_command(COMMAND.COM_QUERY, "COMMIT")
yield self._read_ok_packet()
@gen.coroutine
def rollback(self):
''' Roll back the current transaction '''
yield self._execute_command(COMMAND.COM_QUERY, "ROLLBACK")
yield self._read_ok_packet()
@gen.coroutine
def show_warnings(self):
"""SHOW WARNINGS"""
yield self._execute_command(COMMAND.COM_QUERY, "SHOW WARNINGS")
result = MySQLResult(self)
yield result.read()
raise gen.Return(result.rows)
@gen.coroutine
def select_db(self, db):
'''Set current db'''
yield self._execute_command(COMMAND.COM_INIT_DB, db)
yield self._read_ok_packet()
def escape(self, obj):
''' Escape whatever value you pass to it '''
if isinstance(obj, str_type):
return "'" + self.escape_string(obj) + "'"
return escape_item(obj, self.charset)
def literal(self, obj):
'''Alias for escape()'''
return self.escape(obj)
def escape_string(self, s):
if (self.server_status &
SERVER_STATUS.SERVER_STATUS_NO_BACKSLASH_ESCAPES):
return s.replace("'", "''")
return escape_string(s)
def cursor(self, cursor=None):
''' Create a new cursor to execute queries with '''
if cursor:
return cursor(self)
return self.cursorclass(self)
# The following methods are INTERNAL USE ONLY (called from Cursor)
@gen.coroutine
def query(self, sql, unbuffered=False):
if DEBUG:
print("DEBUG: sending query:", sql)
if isinstance(sql, text_type) and not (JYTHON or IRONPYTHON):
sql = sql.encode(self.encoding)
yield self._execute_command(COMMAND.COM_QUERY, sql)
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
@gen.coroutine
def next_result(self, unbuffered=False):
yield self._read_query_result(unbuffered=unbuffered)
raise gen.Return(self._affected_rows)
def affected_rows(self):
return self._affected_rows
@gen.coroutine
def kill(self, thread_id):
arg = struct.pack('<I', thread_id)
yield self._execute_command(COMMAND.COM_PROCESS_KILL, arg)
yield self._read_ok_packet()
@gen.coroutine
def ping(self, reconnect=True):
"""Check if the server is alive"""
if self._stream is None:
if reconnect:
yield self.connect()
reconnect = False
else:
raise Error("Already closed")
try:
yield self._execute_command(COMMAND.COM_PING, "")
yield self._read_ok_packet()
except Exception:
if reconnect:
yield self.connect()
yield self.ping(False)
else:
raise
@gen.coroutine
def set_charset(self, charset):
# Make sure charset is supported.
encoding = charset_by_name(charset).encoding
yield self._execute_command(COMMAND.COM_QUERY, "SET NAMES %s" % self.escape(charset))
yield self._read_packet()
self.charset = charset
self.encoding = encoding
@gen.coroutine
def connect(self):
#TODO: Set close callback
#raise OperationalError(2006, "MySQL server has gone away (%r)" % (e,))
sock = None
try:
if self.unix_socket and self.host in ('localhost', '127.0.0.1'):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
addr = self.unix_socket
self.host_info = "Localhost via UNIX socket: " + self.unix_socket
else:
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
addr = (self.host, self.port)
self.host_info = "socket %s:%d" % (self.host, self.port)
stream = iostream.IOStream(sock)
# TODO: handle connect_timeout
yield stream.connect(addr)
self._stream = stream
if self.no_delay:
stream.set_nodelay(True)
yield self._get_server_information()
yield self._request_authentication()
self.connected_time = self.io_loop.time()
if self.sql_mode is not None:
yield self.query("SET sql_mode=%s" % (self.sql_mode,))
if self.init_command is not None:
yield self.query(self.init_command)
yield self.commit()
if self.autocommit_mode is not None:
yield self.autocommit(self.autocommit_mode)
except BaseException as e:
if sock is not None:
try:
sock.close()
except socket.error:
pass
self._stream = None
if isinstance(e, err.MySQLError):
raise
raise OperationalError(
2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e))
@gen.coroutine
def _write_bytes(self, data):
return self._stream.write(data)
@gen.coroutine
def _read_query_result(self, unbuffered=False):
if unbuffered:
try:
result = MySQLResult(self)
yield result.init_unbuffered_query()
except:
result.unbuffered_active = False
result.connection = None
raise
else:
result = MySQLResult(self)
yield result.read()
self._result = result
self._affected_rows = result.affected_rows
if result.server_status is not None:
self.server_status = result.server_status
def insert_id(self):
if self._result:
return self._result.insert_id
else:
return 0
@gen.coroutine
def _execute_command(self, command, sql):
if not self._stream:
raise InterfaceError("(0, 'Not connected')")
# If the last query was unbuffered, make sure it finishes before
# sending new commands
if self._result is not None and self._result.unbuffered_active:
warnings.warn("Previous unbuffered result was left incomplete")
yield self._result._finish_unbuffered_query()
if isinstance(sql, text_type):
sql = sql.encode(self.encoding)
chunk_size = min(MAX_PACKET_LEN, len(sql) + 1) # +1 is for command
prelude = struct.pack('<iB', chunk_size, command)
self._write_bytes(prelude + sql[:chunk_size-1])
if DEBUG: dump_packet(prelude + sql)
if chunk_size < MAX_PACKET_LEN:
return
seq_id = 1
sql = sql[chunk_size-1:]
while True:
chunk_size = min(MAX_PACKET_LEN, len(sql))
prelude = struct.pack('<i', chunk_size)[:3]
data = prelude + int2byte(seq_id%256) + sql[:chunk_size]
self._write_bytes(data)
if DEBUG: dump_packet(data)
sql = sql[chunk_size:]
if not sql and chunk_size < MAX_PACKET_LEN:
break
seq_id += 1
@gen.coroutine
def _request_authentication(self):
self.client_flag |= CLIENT.CAPABILITIES
if self.server_version.startswith('5'):
self.client_flag |= CLIENT.MULTI_RESULTS
if self.user is None:
raise ValueError("Did not specify a username")
charset_id = charset_by_name(self.charset).id
if isinstance(self.user, text_type):
self.user = self.user.encode(self.encoding)
data_init = struct.pack('<iIB23s', self.client_flag, 1, charset_id, b'')
next_packet = 1
if self.ssl:
data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init
next_packet += 1
if DEBUG: dump_packet(data)
yield self._write_bytes(data)
yield self._stream.start_tls(
False,
{'keyfile': self.key,
'certfile': self.cert,
'ssl_version': ssl.PROTOCOL_TLSv1,
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': self.ca})
data = data_init + self.user + b'\0' + \
_scramble(self.password.encode('latin1'), self.salt)
if self.db:
if isinstance(self.db, text_type):
self.db = self.db.encode(self.encoding)
data += self.db + int2byte(0)
data = pack_int24(len(data)) + int2byte(next_packet) + data
next_packet += 2
if DEBUG: dump_packet(data)
self._write_bytes(data)
auth_packet = yield self._read_packet()
# if old_passwords is enabled the packet will be 1 byte long and
# have the octet 254
if auth_packet.is_eof_packet():
# send legacy handshake
data = _scramble_323(self.password.encode('latin1'), self.salt) + b'\0'
data = pack_int24(len(data)) + int2byte(next_packet) + data
self._write_bytes(data)
auth_packet = self._read_packet()
# _mysql support
def thread_id(self):
return self.server_thread_id[0]
def character_set_name(self):
return self.charset
def get_host_info(self):
return self.host_info
def get_proto_info(self):
return self.protocol_version
@gen.coroutine
def _get_server_information(self):
i = 0
packet = yield self._read_packet()
data = packet.get_all_data()
if DEBUG: dump_packet(data)
self.protocol_version = byte2int(data[i:i+1])
i += 1
server_end = data.find(int2byte(0), i)
self.server_version = data[i:server_end].decode('latin1')
i = server_end + 1
self.server_thread_id = struct.unpack('<I', data[i:i+4])
i += 4
self.salt = data[i:i+8]
i += 9 # 8 + 1(filler)
self.server_capabilities = struct.unpack('<H', data[i:i+2])[0]
i += 2
if len(data) >= i + 6:
lang, stat, cap_h, salt_len = struct.unpack('<BHHB', data[i:i+6])
i += 6
self.server_language = lang
self.server_charset = charset_by_id(lang).name
self.server_status = stat
if DEBUG: print("server_status: %x" % stat)
self.server_capabilities |= cap_h << 16
if DEBUG: print("salt_len:", salt_len)
salt_len = max(12, salt_len - 9)
# reserved
i += 10
if len(data) >= i + salt_len:
# salt_len includes auth_plugin_data_part_1 and filler
self.salt += data[i:i+salt_len]
# TODO: AUTH PLUGIN NAME may appeare here.
def get_server_info(self):
return self.server_version
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
|
PyMySQL/Tornado-MySQL | tornado_mysql/pools.py | Pool.stat | python | def stat(self):
return (self._opened_conns, len(self._free_conn), len(self._waitings)) | Returns (opened connections, free connections, waiters) | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/pools.py#L53-L55 | null | class Pool(object):
"""Connection pool like Golang's database/sql.DB.
This connection pool is based on autocommit mode.
You can execute query without knowing connection.
When transaction is necessary, you can checkout transaction object.
"""
def __init__(self,
connect_kwargs,
max_idle_connections=1,
max_recycle_sec=3600,
max_open_connections=0,
io_loop=None,
):
"""
:param dict connect_kwargs: kwargs for tornado_mysql.connect()
:param int max_idle_connections: Max number of keeping connections.
:param int max_recycle_sec: How long connections are recycled.
:param int max_open_connections:
Max number of opened connections. 0 means no limit.
"""
connect_kwargs['autocommit'] = True
self.io_loop = io_loop or IOLoop.current()
self.connect_kwargs = connect_kwargs
self.max_idle = max_idle_connections
self.max_open = max_open_connections
self.max_recycle_sec = max_recycle_sec
self._opened_conns = 0
self._free_conn = deque()
self._waitings = deque()
def _get_conn(self): # -> Future[connection]
now = self.io_loop.time()
# Try to reuse in free pool
while self._free_conn:
conn = self._free_conn.popleft()
if now - conn.connected_time > self.max_recycle_sec:
self._close_async(conn)
continue
log.debug("Reusing connection from pool: %s", self.stat())
fut = Future()
fut.set_result(conn)
return fut
# Open new connection
if self.max_open == 0 or self._opened_conns < self.max_open:
self._opened_conns += 1
log.debug("Creating new connection: %s", self.stat())
fut = connect(**self.connect_kwargs)
fut.add_done_callback(self._on_connect) # self._opened_conns -=1 on exception
return fut
# Wait to other connection is released.
fut = Future()
self._waitings.append(fut)
return fut
def _on_connect(self, fut):
if fut.exception():
self._opened_conns -= 1
def _put_conn(self, conn):
if (len(self._free_conn) < self.max_idle and
self.io_loop.time() - conn.connected_time < self.max_recycle_sec):
if self._waitings:
fut = self._waitings.popleft()
fut.set_result(conn)
log.debug("Passing returned connection to waiter: %s", self.stat())
else:
self._free_conn.append(conn)
log.debug("Add conn to free pool: %s", self.stat())
else:
self._close_async(conn)
def _close_async(self, conn):
self.io_loop.add_future(conn.close_async(), callback=self._after_close)
def _close_conn(self, conn):
conn.close()
self._after_close()
def _after_close(self, fut=None):
if self._waitings:
fut = self._waitings.popleft()
conn = Connection(**self.connect_kwargs)
cf = conn.connect()
self.io_loop.add_future(cf, callback=lambda f: fut.set_result(conn))
else:
self._opened_conns -= 1
log.debug("Connection closed: %s", self.stat())
@coroutine
def execute(self, query, params=None, cursor=None):
"""Execute query in pool.
Returns future yielding closed cursor.
You can get rows, lastrowid, etc from the cursor.
:param cursor: cursor class(Cursor, DictCursor. etc.)
:return: Future of cursor
:rtype: Future
"""
conn = yield self._get_conn()
try:
cur = conn.cursor(cursor)
yield cur.execute(query, params)
yield cur.close()
except:
self._close_conn(conn)
raise
else:
self._put_conn(conn)
raise Return(cur)
@coroutine
def begin(self):
"""Start transaction
Wait to get connection and returns `Transaction` object.
:return: Future[Transaction]
:rtype: Future
"""
conn = yield self._get_conn()
try:
yield conn.begin()
except:
self._close_conn(conn)
raise
trx = Transaction(self, conn)
raise Return(trx)
|
PyMySQL/Tornado-MySQL | tornado_mysql/pools.py | Pool.execute | python | def execute(self, query, params=None, cursor=None):
conn = yield self._get_conn()
try:
cur = conn.cursor(cursor)
yield cur.execute(query, params)
yield cur.close()
except:
self._close_conn(conn)
raise
else:
self._put_conn(conn)
raise Return(cur) | Execute query in pool.
Returns future yielding closed cursor.
You can get rows, lastrowid, etc from the cursor.
:param cursor: cursor class(Cursor, DictCursor. etc.)
:return: Future of cursor
:rtype: Future | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/pools.py#L119-L139 | [
"def _get_conn(self): # -> Future[connection]\n now = self.io_loop.time()\n\n # Try to reuse in free pool\n while self._free_conn:\n conn = self._free_conn.popleft()\n if now - conn.connected_time > self.max_recycle_sec:\n self._close_async(conn)\n continue\n log... | class Pool(object):
"""Connection pool like Golang's database/sql.DB.
This connection pool is based on autocommit mode.
You can execute query without knowing connection.
When transaction is necessary, you can checkout transaction object.
"""
def __init__(self,
connect_kwargs,
max_idle_connections=1,
max_recycle_sec=3600,
max_open_connections=0,
io_loop=None,
):
"""
:param dict connect_kwargs: kwargs for tornado_mysql.connect()
:param int max_idle_connections: Max number of keeping connections.
:param int max_recycle_sec: How long connections are recycled.
:param int max_open_connections:
Max number of opened connections. 0 means no limit.
"""
connect_kwargs['autocommit'] = True
self.io_loop = io_loop or IOLoop.current()
self.connect_kwargs = connect_kwargs
self.max_idle = max_idle_connections
self.max_open = max_open_connections
self.max_recycle_sec = max_recycle_sec
self._opened_conns = 0
self._free_conn = deque()
self._waitings = deque()
def stat(self):
"""Returns (opened connections, free connections, waiters)"""
return (self._opened_conns, len(self._free_conn), len(self._waitings))
def _get_conn(self): # -> Future[connection]
now = self.io_loop.time()
# Try to reuse in free pool
while self._free_conn:
conn = self._free_conn.popleft()
if now - conn.connected_time > self.max_recycle_sec:
self._close_async(conn)
continue
log.debug("Reusing connection from pool: %s", self.stat())
fut = Future()
fut.set_result(conn)
return fut
# Open new connection
if self.max_open == 0 or self._opened_conns < self.max_open:
self._opened_conns += 1
log.debug("Creating new connection: %s", self.stat())
fut = connect(**self.connect_kwargs)
fut.add_done_callback(self._on_connect) # self._opened_conns -=1 on exception
return fut
# Wait to other connection is released.
fut = Future()
self._waitings.append(fut)
return fut
def _on_connect(self, fut):
if fut.exception():
self._opened_conns -= 1
def _put_conn(self, conn):
if (len(self._free_conn) < self.max_idle and
self.io_loop.time() - conn.connected_time < self.max_recycle_sec):
if self._waitings:
fut = self._waitings.popleft()
fut.set_result(conn)
log.debug("Passing returned connection to waiter: %s", self.stat())
else:
self._free_conn.append(conn)
log.debug("Add conn to free pool: %s", self.stat())
else:
self._close_async(conn)
def _close_async(self, conn):
self.io_loop.add_future(conn.close_async(), callback=self._after_close)
def _close_conn(self, conn):
conn.close()
self._after_close()
def _after_close(self, fut=None):
if self._waitings:
fut = self._waitings.popleft()
conn = Connection(**self.connect_kwargs)
cf = conn.connect()
self.io_loop.add_future(cf, callback=lambda f: fut.set_result(conn))
else:
self._opened_conns -= 1
log.debug("Connection closed: %s", self.stat())
@coroutine
@coroutine
def begin(self):
"""Start transaction
Wait to get connection and returns `Transaction` object.
:return: Future[Transaction]
:rtype: Future
"""
conn = yield self._get_conn()
try:
yield conn.begin()
except:
self._close_conn(conn)
raise
trx = Transaction(self, conn)
raise Return(trx)
|
PyMySQL/Tornado-MySQL | tornado_mysql/pools.py | Pool.begin | python | def begin(self):
conn = yield self._get_conn()
try:
yield conn.begin()
except:
self._close_conn(conn)
raise
trx = Transaction(self, conn)
raise Return(trx) | Start transaction
Wait to get connection and returns `Transaction` object.
:return: Future[Transaction]
:rtype: Future | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/pools.py#L142-L157 | [
"def _get_conn(self): # -> Future[connection]\n now = self.io_loop.time()\n\n # Try to reuse in free pool\n while self._free_conn:\n conn = self._free_conn.popleft()\n if now - conn.connected_time > self.max_recycle_sec:\n self._close_async(conn)\n continue\n log... | class Pool(object):
"""Connection pool like Golang's database/sql.DB.
This connection pool is based on autocommit mode.
You can execute query without knowing connection.
When transaction is necessary, you can checkout transaction object.
"""
def __init__(self,
connect_kwargs,
max_idle_connections=1,
max_recycle_sec=3600,
max_open_connections=0,
io_loop=None,
):
"""
:param dict connect_kwargs: kwargs for tornado_mysql.connect()
:param int max_idle_connections: Max number of keeping connections.
:param int max_recycle_sec: How long connections are recycled.
:param int max_open_connections:
Max number of opened connections. 0 means no limit.
"""
connect_kwargs['autocommit'] = True
self.io_loop = io_loop or IOLoop.current()
self.connect_kwargs = connect_kwargs
self.max_idle = max_idle_connections
self.max_open = max_open_connections
self.max_recycle_sec = max_recycle_sec
self._opened_conns = 0
self._free_conn = deque()
self._waitings = deque()
def stat(self):
"""Returns (opened connections, free connections, waiters)"""
return (self._opened_conns, len(self._free_conn), len(self._waitings))
def _get_conn(self): # -> Future[connection]
now = self.io_loop.time()
# Try to reuse in free pool
while self._free_conn:
conn = self._free_conn.popleft()
if now - conn.connected_time > self.max_recycle_sec:
self._close_async(conn)
continue
log.debug("Reusing connection from pool: %s", self.stat())
fut = Future()
fut.set_result(conn)
return fut
# Open new connection
if self.max_open == 0 or self._opened_conns < self.max_open:
self._opened_conns += 1
log.debug("Creating new connection: %s", self.stat())
fut = connect(**self.connect_kwargs)
fut.add_done_callback(self._on_connect) # self._opened_conns -=1 on exception
return fut
# Wait to other connection is released.
fut = Future()
self._waitings.append(fut)
return fut
def _on_connect(self, fut):
if fut.exception():
self._opened_conns -= 1
def _put_conn(self, conn):
if (len(self._free_conn) < self.max_idle and
self.io_loop.time() - conn.connected_time < self.max_recycle_sec):
if self._waitings:
fut = self._waitings.popleft()
fut.set_result(conn)
log.debug("Passing returned connection to waiter: %s", self.stat())
else:
self._free_conn.append(conn)
log.debug("Add conn to free pool: %s", self.stat())
else:
self._close_async(conn)
def _close_async(self, conn):
self.io_loop.add_future(conn.close_async(), callback=self._after_close)
def _close_conn(self, conn):
conn.close()
self._after_close()
def _after_close(self, fut=None):
if self._waitings:
fut = self._waitings.popleft()
conn = Connection(**self.connect_kwargs)
cf = conn.connect()
self.io_loop.add_future(cf, callback=lambda f: fut.set_result(conn))
else:
self._opened_conns -= 1
log.debug("Connection closed: %s", self.stat())
@coroutine
def execute(self, query, params=None, cursor=None):
"""Execute query in pool.
Returns future yielding closed cursor.
You can get rows, lastrowid, etc from the cursor.
:param cursor: cursor class(Cursor, DictCursor. etc.)
:return: Future of cursor
:rtype: Future
"""
conn = yield self._get_conn()
try:
cur = conn.cursor(cursor)
yield cur.execute(query, params)
yield cur.close()
except:
self._close_conn(conn)
raise
else:
self._put_conn(conn)
raise Return(cur)
@coroutine
|
PyMySQL/Tornado-MySQL | tornado_mysql/pools.py | Transaction.execute | python | def execute(self, query, args=None):
self._ensure_conn()
cur = self._conn.cursor()
yield cur.execute(query, args)
raise Return(cur) | :return: Future[Cursor]
:rtype: Future | train | https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/pools.py#L175-L183 | [
"def _ensure_conn(self):\n if self._conn is None:\n raise Exception(\"Transaction is closed already\")\n"
] | class Transaction(object):
"""Represents transaction in pool"""
def __init__(self, pool, conn):
self._pool = pool
self._conn = conn
def _ensure_conn(self):
if self._conn is None:
raise Exception("Transaction is closed already")
def _close(self):
self._pool._put_conn(self._conn)
self._pool = self._conn = None
@coroutine
@coroutine
def commit(self):
self._ensure_conn()
yield self._conn.commit()
self._close()
@coroutine
def rollback(self):
self._ensure_conn()
yield self._conn.rollback()
self._close()
def __del__(self):
if self._pool is not None:
warnings.warn("Transaction has not committed or rollbacked.")
log.warn("Transaction has not committed or rollbacked.")
self._pool._close_conn(self._conn)
|
adamcharnock/swiftwind | swiftwind/housemates/forms.py | HousemateCreateForm.clean_account | python | def clean_account(self):
account = self.cleaned_data['account']
if not account:
return
if account.type != Account.TYPES.income:
raise ValidationError('Account must be an income account')
try:
account.housemate
except Housemate.DoesNotExist:
pass
else:
raise ValidationError('Account already has a housemate')
return account | Ensure this is an income account | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/housemates/forms.py#L28-L44 | null | class HousemateCreateForm(forms.ModelForm):
existing_user = forms.ModelChoiceField(required=False, to_field_name='username',
queryset=get_user_model().objects.filter(housemate=None, is_active=True),
)
new_username = forms.CharField(required=False, max_length=150)
new_email = forms.EmailField(required=False)
new_first_name = forms.CharField(required=False)
new_last_name = forms.CharField(required=False)
account = TreeNodeChoiceField(Account.objects.all(), required=False, to_field_name='uuid',
empty_label='-- Create new account for user --')
class Meta:
model = Housemate
fields = []
# Prevent django from prematurely failing the account for being null
exclude = ['account']
def clean(self):
if self.errors:
return
User = get_user_model()
existing_user_specified = bool(self.cleaned_data.get('existing_user'))
new_user_specified = \
bool(self.cleaned_data.get('new_username')) or \
bool(self.cleaned_data.get('new_email')) or \
bool(self.cleaned_data.get('new_first_name')) or \
bool(self.cleaned_data.get('new_last_name'))
# Ensure the use has done one or the other. Not both. Not neither.
if existing_user_specified and new_user_specified:
raise ValidationError('Either select an existing user or enter details for a new user')
if not existing_user_specified and not new_user_specified:
raise ValidationError('Either select an existing user or enter details for a new user')
# Make sure the required data has been provided to create a new user
if new_user_specified:
username = self.cleaned_data.get('new_username')
email = self.cleaned_data.get('new_email')
first_name = self.cleaned_data.get('new_first_name')
last_name = self.cleaned_data.get('new_last_name')
if not username:
raise ValidationError('Username must be specified to create a new user')
if not email:
raise ValidationError('Email must be specified to create a new user')
if not first_name:
raise ValidationError('First name must be specified to create a new user')
if not last_name:
raise ValidationError('Last name must be specified to create a new user')
if User.objects.filter(username=username).count():
raise ValidationError('Username already in use')
if User.objects.filter(email=email).count():
raise ValidationError('Email already in use')
# Let's be thorough
self.cleaned_data['existing_user'] = None
else:
self.cleaned_data['new_username'] = None
self.cleaned_data['new_email'] = None
self.cleaned_data['new_first_name'] = None
self.cleaned_data['new_last_name'] = None
def save(self, commit=True):
User = get_user_model()
user = self.cleaned_data.get('existing_user')
account = self.cleaned_data.get('account')
# Create a user if we need to
if not user:
user = User.objects.create(
username=self.cleaned_data.get('new_username'),
email=self.cleaned_data.get('new_email'),
first_name=self.cleaned_data.get('new_first_name'),
last_name=self.cleaned_data.get('new_last_name'),
)
# Create an account if we need to
if not account:
# TODO: Generalize, housemate parent account should be configurable
parent = Account.objects.get(name='Housemate Income')
# Figure out the next highest code
# TODO: Move this logic into hordak's AccountManager
codes = Account.objects.filter(parent=parent).values_list('code', flat=True)
codes = list(filter(lambda code: code.isdigit(), codes))
if not codes:
code = '00'
else:
max_code = max(codes)
code_length = len(max_code)
code = str(int(max_code) + 1).zfill(code_length)
account = Account.objects.create(
name=user.get_full_name() or user.username,
parent=parent,
code=code,
currencies=[getattr(settings, 'DEFAULT_CURRENCY', 'EUR')],
)
self.instance.user = user
self.instance.account = account
return super(HousemateCreateForm, self).save(commit)
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.get_amount_normal | python | def get_amount_normal(self, billing_cycle):
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount | Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle. | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L121-L144 | [
"def is_one_off(self):\n return bool(self.total_billing_cycles)\n"
] | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.get_amount_arrears_balance | python | def get_amount_arrears_balance(self, billing_cycle):
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
) | Get the balance of to_account at the end of billing_cycle | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L146-L150 | null | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.get_amount_arrears_transactions | python | def get_amount_arrears_transactions(self, billing_cycle):
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
) | Get the sum of all transaction legs in to_account during given billing cycle | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L152-L160 | null | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.enact | python | def enact(self, billing_cycle, disable_if_done=True):
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle) | Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L166-L194 | [
"def is_enactable(self, as_of):\n \"\"\"Can this RecurringCost be enacted\"\"\"\n return \\\n not self.disabled and \\\n not self.archived and \\\n not self._is_finished(as_of) and \\\n self._is_ready(as_of) and \\\n not self._is_billing_complete()\n"
] | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.disable_if_done | python | def disable_if_done(self, commit=True):
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save() | Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs. | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L196-L205 | [
"def _is_billing_complete(self):\n \"\"\"Has the specified `fixed_amount` been billed?\n\n If so, we should not be enacting this RecurringCost.\n \"\"\"\n if self.is_one_off():\n return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)\n else:\n return False\n"
] | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.is_enactable | python | def is_enactable(self, as_of):
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete() | Can this RecurringCost be enacted | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L217-L224 | [
"def _is_finished(self, as_of):\n \"\"\"Have the specified number of billing cycles been completed?\n\n If so, we should not be enacting this RecurringCost.\n \"\"\"\n if self.is_one_off():\n last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]\n return last_billin... | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost.has_enacted | python | def has_enacted(self, billing_cycle):
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists() | Has this recurring cost already enacted transactions for given billing cycle? | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L226-L231 | null | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost._is_ready | python | def _is_ready(self, as_of):
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True | Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date): | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L236-L248 | null | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost._is_finished | python | def _is_finished(self, as_of):
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False | Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost. | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L250-L259 | [
"def is_one_off(self):\n return bool(self.total_billing_cycles)\n"
] | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost._is_billing_complete | python | def _is_billing_complete(self):
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False | Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost. | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L261-L269 | [
"def is_one_off(self):\n return bool(self.total_billing_cycles)\n"
] | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _get_billing_cycle_number(self, billing_cycle):
"""Gets the 1-indexed number of the billing cycle relative to the provided billing cycle"""
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCost._get_billing_cycle_number | python | def _get_billing_cycle_number(self, billing_cycle):
begins_before_initial_date = billing_cycle.date_range.lower < self.initial_billing_cycle.date_range.lower
if begins_before_initial_date:
raise ProvidedBillingCycleBeginsBeforeInitialBillingCycle(
'{} precedes initial cycle {}'.format(billing_cycle, self.initial_billing_cycle)
)
billing_cycle_number = BillingCycle.objects.filter(
date_range__contained_by=DateRange(
self.initial_billing_cycle.date_range.lower,
billing_cycle.date_range.upper,
bounds='[]',
),
).count()
return billing_cycle_number | Gets the 1-indexed number of the billing cycle relative to the provided billing cycle | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L271-L287 | null | class RecurringCost(models.Model):
""" Represents recurring costs and one-off costs
Recurring costs recur indefinitely, or until `disabled` is set to `True`.
One-off costs have a value for `total_billing_cycles` set. This value indicates
how many billing cycles the one-off cost should be spread. After this point
the recurring cost will be disabled.
Additionally, the type field indicates how the cost should calculate the amount
to be billed.
A note on 'enacting': We use the term 'enact' to refer to the creation of a
definite RecurredCost from the more conceptual RecurringCost. This RecurredCost
is the creator - and link to - the actual transactions created for the cost in a
given billing cycle.
"""
TYPES = Choices(
(
'normal',
"We will not have spent this yet. We will estimate "
"a fixed amount per billing cycle. (You should select a "
"'liabilities' account)."
),
(
'arrears_balance',
"We will have already spent this in the previous billing "
"cycle, so bill the account's balance. (You should select "
"an 'expenses' account)"
),
(
'arrears_transactions',
"We will have already spent this in the previous cycle, "
"so bill the total amount spent in the previous cycle. "
"(You should select an 'expenses' account)"
),
)
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
from_accounts = models.ManyToManyField('hordak.Account',
through='costs.RecurringCostSplit',
related_name='outbound_costs')
to_account = models.ForeignKey('hordak.Account', related_name='inbound_costs')
#: The disabled flag is mostly present for the benefit of the database checks & triggers.
#: We could infer the disabled state from other values (billed amount, number of billing
#: periods billed, etc), but checking this would make our triggers rather complex.
disabled = models.BooleanField(default=False)
archived = models.BooleanField(default=False)
# The amount to be billed per cycle for recurring costs, or the amount to spread
# across cycles for one-off costs
fixed_amount = models.DecimalField(max_digits=13, decimal_places=2, null=True, blank=True)
total_billing_cycles = models.PositiveIntegerField(default=None, null=True, blank=True,
help_text='Stop billing after this many billing cycles.')
type = models.CharField(max_length=20, choices=TYPES, default=TYPES.normal)
initial_billing_cycle = models.ForeignKey('billing_cycle.BillingCycle')
transactions = models.ManyToManyField(Transaction, through='costs.RecurredCost')
objects = models.Manager.from_queryset(RecurringCostQuerySet)()
@property
def currency(self):
# This is a simplification, but probably ok for now as swiftwind probably won't
# need to deal with multiple currencies given its target audience
return self.to_account.currencies[0]
def get_amount(self, billing_cycle):
amount = {
RecurringCost.TYPES.normal: self.get_amount_normal,
RecurringCost.TYPES.arrears_balance: self.get_amount_arrears_balance,
RecurringCost.TYPES.arrears_transactions: self.get_amount_arrears_transactions,
}[self.type](billing_cycle)
if isinstance(amount, Balance):
# Convert balances into decimals
monies = amount.monies()
assert len(monies) in (0, 1)
amount = monies[0].amount if amount else Decimal('0')
return amount
def get_amount_normal(self, billing_cycle):
"""Get the amount due on the given billing cycle
For regular recurring costs this is simply `fixed_amount`. For
one-off costs this is the portion of `fixed_amount` for the given
billing_cycle.
"""
if self.is_one_off():
billing_cycle_number = self._get_billing_cycle_number(billing_cycle)
if billing_cycle_number > self.total_billing_cycles:
# A future billing cycle after this one has ended
return Decimal('0')
else:
# This is a current cycle. Split the amount into
# equal parts then return the part for this cycle
splits = ratio_split(
amount=self.fixed_amount,
ratios=[Decimal('1')] * self.total_billing_cycles,
)
return splits[billing_cycle_number - 1]
else:
# This is a none-one-off recurring cost, so the logic is simple
return self.fixed_amount
def get_amount_arrears_balance(self, billing_cycle):
"""Get the balance of to_account at the end of billing_cycle"""
return self.to_account.balance(
transaction__date__lt=billing_cycle.date_range.lower,
)
def get_amount_arrears_transactions(self, billing_cycle):
"""Get the sum of all transaction legs in to_account during given billing cycle"""
previous_billing_cycle = billing_cycle.get_previous()
if not previous_billing_cycle:
return Decimal(0)
return self.to_account.balance(
transaction__date__lt=previous_billing_cycle.date_range.upper,
transaction__date__gte=previous_billing_cycle.date_range.lower,
)
def get_billed_amount(self):
"""Get the total amount billed so far"""
return Leg.objects.filter(transaction__recurred_cost__recurring_cost=self, amount__gt=0).sum_to_balance()
def enact(self, billing_cycle, disable_if_done=True):
"""Enact this RecurringCost for the given billing cycle
This will:
- Create a RecurredCost and the relevant Transactions & Transaction Legs
- Mark this RecurringCost as disabled if this is its final billing cycle
"""
as_of = billing_cycle.date_range.lower
if not self.is_enactable(as_of):
raise CannotEnactUnenactableRecurringCostError(
"RecurringCost {} is unenactable.".format(self.uuid)
)
if self.has_enacted(billing_cycle):
raise RecurringCostAlreadyEnactedForBillingCycle(
'RecurringCost cost {} already enacted for {}'.format(self, billing_cycle)
)
with db_transaction.atomic():
recurred_cost = RecurredCost(
recurring_cost=self,
billing_cycle=billing_cycle,
)
recurred_cost.make_transaction()
recurred_cost.save()
if disable_if_done:
self.disable_if_done(billing_cycle)
def disable_if_done(self, commit=True):
"""Set disabled=True if we have billed all we need to
Will only have an effect on one-off costs.
"""
if self._is_billing_complete() and not self.disabled:
self.disabled = True
if commit:
self.save()
def archive(self, commit=True):
self.archived = True
if commit:
self.save()
def unarchive(self, commit=True):
self.archived = False
if commit:
self.save()
def is_enactable(self, as_of):
"""Can this RecurringCost be enacted"""
return \
not self.disabled and \
not self.archived and \
not self._is_finished(as_of) and \
self._is_ready(as_of) and \
not self._is_billing_complete()
def has_enacted(self, billing_cycle):
"""Has this recurring cost already enacted transactions for given billing cycle?"""
return RecurredCost.objects.filter(
recurring_cost=self,
billing_cycle=billing_cycle,
).exists()
def is_one_off(self):
return bool(self.total_billing_cycles)
def _is_ready(self, as_of):
"""Is the RecurringCost ready to be enacted as of the date `as_of`
This determines if `as_of` precedes the start of `initial_billing_cycle`. If so,
we should not be enacting this RecurringCost yet.
Args:
as_of (Date):
"""
if self.is_one_off():
return self.initial_billing_cycle.date_range.lower <= as_of
else:
return True
def _is_finished(self, as_of):
"""Have the specified number of billing cycles been completed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]
return last_billing_cycle.date_range.upper <= as_of
else:
return False
def _is_billing_complete(self):
"""Has the specified `fixed_amount` been billed?
If so, we should not be enacting this RecurringCost.
"""
if self.is_one_off():
return self.get_billed_amount() >= Balance(self.fixed_amount, self.currency)
else:
return False
def get_billing_cycles(self):
return BillingCycle.objects.filter(
start_date__gte=self.initial_billing_cycle.date_range.lower
)[:self.total_billing_cycles]
def can_delete(self):
return not self.transactions.exists()
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurringCostSplitQuerySet.split | python | def split(self, amount):
split_objs = list(self.all())
if not split_objs:
raise NoSplitsFoundForRecurringCost()
portions = [split_obj.portion for split_obj in split_objs]
split_amounts = ratio_split(amount, portions)
return [
(split_objs[i], split_amount)
for i, split_amount
in enumerate(split_amounts)
] | Split the value given by amount according to the RecurringCostSplit's portions
Args:
amount (Decimal):
Returns:
list[(RecurringCostSplit, Decimal)]: A list with elements in the form (RecurringCostSplit, Decimal) | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L300-L320 | null | class RecurringCostSplitQuerySet(QuerySet):
|
adamcharnock/swiftwind | swiftwind/costs/models.py | RecurredCost.make_transaction | python | def make_transaction(self):
if self.pk:
raise CannotRecreateTransactionOnRecurredCost(
'The transaction for this recurred cost has already been created. You cannot create it again.'
)
amount = self.recurring_cost.get_amount(self.billing_cycle)
# It is quite possible that there will be nothing to bill, in which
# case we cannot create a transaction with no legs, nor can we create
# legs with zero values. Therefore we don't create any transaction.
if not amount:
return None
self.transaction = Transaction.objects.create(
description='Created by recurring cost',
date=self.billing_cycle.date_range.lower
)
# Use the SplitManager's custom queryset's split() method to get the
# amount to be billed for each split
splits = self.recurring_cost.splits.all().split(amount)
# Create the transaction leg for the outbound funds
# (normally to an expense account)
self.transaction.legs.add(Leg.objects.create(
transaction=self.transaction,
amount=Money(amount, self.recurring_cost.currency),
account=self.recurring_cost.to_account,
))
for split, split_amount in splits:
# Create the transaction legs for the inbound funds
# (from housemate accounts)
if split_amount:
self.transaction.legs.add(Leg.objects.create(
transaction=self.transaction,
amount=Money(split_amount * -1, self.recurring_cost.currency),
account=split.from_account,
))
return self.transaction | Create the transaction for this RecurredCost
May only be used to create the RecurredCost's initial transaction.
Returns:
Transaction: The created transaction, also assigned to self.transaction. None if the amount is zero. | train | https://github.com/adamcharnock/swiftwind/blob/72c715800841c3b2feabded3f3b65b76388b4cea/swiftwind/costs/models.py#L357-L405 | null | class RecurredCost(models.Model):
"""A record of a recurring cost which has been enacted.
Links together the RecurringCost, BillingCycle, and Transaction.
"""
uuid = SmallUUIDField(default=uuid_default(), editable=False)
timestamp = models.DateTimeField(default=timezone.now, editable=False)
recurring_cost = models.ForeignKey(RecurringCost, related_name='recurrences')
billing_cycle = models.ForeignKey('billing_cycle.BillingCycle', related_name='recurring_costs')
transaction = models.OneToOneField(Transaction, related_name='recurred_cost', unique=True, null=True,
on_delete=models.PROTECT)
class Meta:
unique_together = (
# A RecurringCost should only be enacted once per billing cycle
('recurring_cost', 'billing_cycle'),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.