repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
prthkms/alex | alex/preprocess.py | QueryMatcher.query | python | def query(self, query):
self.query = query
self.process_query()
matching_corpus_index = self.match_query_to_corpus()
return self.category_list[matching_corpus_index].strip() | Q.query(query string) -> category string -- return the matched
category for any user query | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/preprocess.py#L149-L156 | [
"def match_query_to_corpus(self):\n\t\"\"\"Q.match_query_to_corpus() -> index -- return the matched corpus \n\tindex of the user query \n\t\"\"\"\n\tranking = []\n\tfor i,doc in enumerate(self.processed_corpus):\n\t\trank = 0.0\n\t\tfor word in self.processed_query:\n\t\t\tif word in doc:\n\t\t\t\trank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word]\n\t\tranking.append((rank,i))\n\tmatching_corpus_index = 0\n\tmax_rank = 0\n\tfor rank,index in ranking:\n\t\tif rank > max_rank:\n\t\t\tmatching_corpus_index = index\n\t\t\tmax_rank = rank\n\treturn matching_corpus_index\n",
"def process_query(self):\n\t\"\"\"Q.process_query() -- processes the user query, \n\tby tokenizing and stemming words.\n\t\"\"\"\n\tself.query = wt(self.query)\n\tself.processed_query = []\n\tfor word in self.query:\n\t\tif word not in self.stop_words and word not in self.punctuation:\n\t\t\tself.processed_query.append(self.stemmer.stem(word))\n"
] | class QueryMatcher(object):
"""This an implementation of tf-idf ranking
(term frequency - inverse document frequency) for information
retreival and text mining.
1. Each sentence in 'corpus.txt' acts as a document,
and the processed words in each sentence act as terms.
2. Frequently occuring stop words are removed.
3. Stemming is done on each word, i.e. reducing inflected or derived
words to their word stem, base or root form.
4. A new user query undergoes tf-idf ranking, and the highest
ranked sentence(document) is picked up and mapped to a category.
"""
def __init__(self):
super(QueryMatcher, self).__init__()
self.initialize()
def calculate_inverse_document_frequencies(self):
"""Q.calculate_inverse_document_frequencies() -- measures how much
information the term provides, i.e. whether the term is common or
rare across all documents.
This is obtained by dividing the total number of documents
by the number of documents containing the term,
and then taking the logarithm of that quotient.
"""
for doc in self.processed_corpus:
for word in doc:
self.inverse_document_frequencies[word] += 1
for key,value in self.inverse_document_frequencies.iteritems():
idf = log((1.0 * len(self.corpus)) / value)
self.inverse_document_frequencies[key] = idf
def calculate_term_frequencies(self):
"""Q.calculate_term_frequencies() -- calculate the number of times
each term t occurs in document d.
"""
for doc in self.processed_corpus:
term_frequency_doc = defaultdict(int)
for word in doc:
term_frequency_doc[word] += 1
for key,value in term_frequency_doc.iteritems():
term_frequency_doc[key] = (1.0 * value) / len(doc)
self.term_frequencies.append(term_frequency_doc)
def initialize(self):
'''
corpus : contains a list of sentences, each of which acts as
a document
category : contains a category of each sentence in the corpus.
stemmer : imported from the nltk library, used for reducing
words to their root form.
'''
self.stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves',
'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those',
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and',
'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by',
'for', 'with', 'about', 'between', 'into','to', 'during', 'before',
'after', 'above', 'below', 'from', 'up', 'down', 'in', 'on', 'under',
'again', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how',
'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some',
'such', 'nor', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's',
't', 'can', 'will', 'just', 'don', 'should', 'now']
ALEX_DIR = os.path.join(os.path.expanduser('~'),'alex')
#ALEX_DIR = '/home/pratheek/work/git_repos/alex/alex'
#ALEX_DIR = '/home/chitra/aost/alex/alex'
#ALEX_DIR = '/home/anushree/aost/alex/alex'
self.category = open(os.path.join(ALEX_DIR,'category.txt'))
self.corpus = open(os.path.join(ALEX_DIR,'corpus.txt'))
self.corpus_list = self.corpus.readlines()
self.category_list = self.category.readlines()
self.corpus.seek(0)
self.corpus = self.corpus.read()
self.processed_corpus = []
self.punctuation = [',', '.', '?', '!']
self.stemmer = PorterStemmer()
self.inverse_document_frequencies = defaultdict(float)
self.term_frequencies = []
#--------------------------------------
self.process_corpus()
self.calculate_inverse_document_frequencies()
self.calculate_term_frequencies()
def match_query_to_corpus(self):
"""Q.match_query_to_corpus() -> index -- return the matched corpus
index of the user query
"""
ranking = []
for i,doc in enumerate(self.processed_corpus):
rank = 0.0
for word in self.processed_query:
if word in doc:
rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word]
ranking.append((rank,i))
matching_corpus_index = 0
max_rank = 0
for rank,index in ranking:
if rank > max_rank:
matching_corpus_index = index
max_rank = rank
return matching_corpus_index
def process_corpus(self):
"""Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words.
"""
for doc in self.corpus_list:
doc = wt(doc)
sentence = []
for word in doc:
if word not in self.stop_words and word not in self.punctuation:
word = self.stemmer.stem(word)
sentence.append(word)
self.processed_corpus.append(sentence)
def process_query(self):
"""Q.process_query() -- processes the user query,
by tokenizing and stemming words.
"""
self.query = wt(self.query)
self.processed_query = []
for word in self.query:
if word not in self.stop_words and word not in self.punctuation:
self.processed_query.append(self.stemmer.stem(word))
|
prthkms/alex | alex/web.py | weather | python | def weather(query):
print 'Identifying the location . . .'
try:
response = unirest.post("https://textanalysis.p.mashape.com/nltk-stanford-ner",
headers={
"X-Mashape-Key": "E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP",
"Content-Type": "application/x-www-form-urlencoded"
},
params={
"text": query
}
)
except:
print 'Unable to connect to internet'
return
location = ''
for entity in response.body['result'].split():
word,tag = entity.split('/')
if(tag == 'LOCATION'):
location += ' '+word
if(location != ''):
print 'Gathering weather information for'+location
import urllib2, urllib, json
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = "select * from weather.forecast where woeid in \
(select woeid from geo.places(1) where text=\""+location+"\")"
yql_url = baseurl + urllib.urlencode({'q':yql_query}) + "&format=json"
try:
result = urllib2.urlopen(yql_url).read()
data = json.loads(result)
result = data['query']['results']['channel']
print result['location']['city']+' '+result['location']['country']+' '+result['location']['region']
print result['item']['condition']['date']
print result['item']['condition']['text']
print result['item']['condition']['temp']+' '+result['units']['temperature']
except:
print 'Unable to connect to internet'
else:
print 'Unable to get the location.' | weather(query) -- use Name Entity Recogniser (nltk-stanford-ner), to
determine location entity in query and fetch weather info for that location
(using yahoo apis). | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/web.py#L4-L47 | null | import duckduckgo
import unirest
def generic(query):
""" generic(query) -- process a generic user query using the Stanford
NLTK NER and duckduckgo api.
"""
try:
response = unirest.post("https://textanalysis.p.mashape.com/nltk-stanford-ner",
headers={
"X-Mashape-Key": "E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP",
"Content-Type": "application/x-www-form-urlencoded"
},
params={
"text": query
}
)
except:
print 'Unable to connect to internet'
return
web_query = ''
for entity in response.body['result'].split():
word,tag = entity.split('/')
if(tag != 'O'):
web_query += ' '+word
if(web_query != ''):
web_query = web_query.strip().split()
duckduckgo.query(web_query)
else:
print 'I do not know how to process this query at this moment.'
|
prthkms/alex | alex/web.py | generic | python | def generic(query):
try:
response = unirest.post("https://textanalysis.p.mashape.com/nltk-stanford-ner",
headers={
"X-Mashape-Key": "E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP",
"Content-Type": "application/x-www-form-urlencoded"
},
params={
"text": query
}
)
except:
print 'Unable to connect to internet'
return
web_query = ''
for entity in response.body['result'].split():
word,tag = entity.split('/')
if(tag != 'O'):
web_query += ' '+word
if(web_query != ''):
web_query = web_query.strip().split()
duckduckgo.query(web_query)
else:
print 'I do not know how to process this query at this moment.' | generic(query) -- process a generic user query using the Stanford
NLTK NER and duckduckgo api. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/web.py#L50-L77 | [
"def query(string):\n\t\"\"\"query(user string) -- make http request to duckduckgo api, to get result\n\tin json format, then call parse_result.\n\t\"\"\"\n\turl = \"https://api.duckduckgo.com/?q=\"\n\tformating = \"&format=json\"\n\tquery_string = url+'+'.join(string)+formating\n\ttry:\n\t\tresult = json.loads(requests.get(query_string).text)\n\texcept:\n\t\tprint \"I'm sorry! Something went wrong. Maybe we could try again later.\"\n\t\treturn\n\tparse_result(result)\n"
] | import duckduckgo
import unirest
def weather(query):
"""weather(query) -- use Name Entity Recogniser (nltk-stanford-ner), to
determine location entity in query and fetch weather info for that location
(using yahoo apis).
"""
print 'Identifying the location . . .'
try:
response = unirest.post("https://textanalysis.p.mashape.com/nltk-stanford-ner",
headers={
"X-Mashape-Key": "E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP",
"Content-Type": "application/x-www-form-urlencoded"
},
params={
"text": query
}
)
except:
print 'Unable to connect to internet'
return
location = ''
for entity in response.body['result'].split():
word,tag = entity.split('/')
if(tag == 'LOCATION'):
location += ' '+word
if(location != ''):
print 'Gathering weather information for'+location
import urllib2, urllib, json
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = "select * from weather.forecast where woeid in \
(select woeid from geo.places(1) where text=\""+location+"\")"
yql_url = baseurl + urllib.urlencode({'q':yql_query}) + "&format=json"
try:
result = urllib2.urlopen(yql_url).read()
data = json.loads(result)
result = data['query']['results']['channel']
print result['location']['city']+' '+result['location']['country']+' '+result['location']['region']
print result['item']['condition']['date']
print result['item']['condition']['text']
print result['item']['condition']['temp']+' '+result['units']['temperature']
except:
print 'Unable to connect to internet'
else:
print 'Unable to get the location.'
def generic(query):
""" generic(query) -- process a generic user query using the Stanford
NLTK NER and duckduckgo api.
"""
try:
response = unirest.post("https://textanalysis.p.mashape.com/nltk-stanford-ner",
headers={
"X-Mashape-Key": "E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP",
"Content-Type": "application/x-www-form-urlencoded"
},
params={
"text": query
}
)
except:
print 'Unable to connect to internet'
return
web_query = ''
for entity in response.body['result'].split():
word,tag = entity.split('/')
if(tag != 'O'):
web_query += ' '+word
if(web_query != ''):
web_query = web_query.strip().split()
duckduckgo.query(web_query)
else:
print 'I do not know how to process this query at this moment.'
|
prthkms/alex | alex/handler.py | lines | python | def lines(query):
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.readlines())
else:
print 'File not found : ' + filename | lines(query) -- print the number of lines in a given file | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/handler.py#L8-L16 | [
"def get_file_name(query):\n\t\"\"\"get_file_name(query) -> filename -- return the filename found in a\n\tgiven, found by matching a regular expression.\n\t\"\"\"\n\tmatch = re.search(r'\\S*\\.[\\d\\w]{1,4}', query)\n\tif(match):\n\t\tfilename = match.group()\n\t\treturn filename\n\telse:\n\t\tstart = match.start()\n\t\tend = match.end()\n\t\tspaces = re.finditer(r' ', query)\n\t\tspace_index = []\n\t\tfor space in spaces:\n\t\t\tspace_index.append(space.start())\n\t\tspace_index.pop()\n\t\tfor i in space_index:\n\t\t\tfilename = query[i+1:end]\n\t\t\tif(os.path.isfile(filename)):\n\t\t\t\treturn filename\n\t\treturn None\n"
] | import re
import os
import support
import pwd
import time
import subprocess
def words(query):
"""lines(query) -- print the number of words in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.read().split())
else:
print 'File not found : ' + filename
def file_info(query):
"""file_info(query) -- print some human readable information of a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
stat_info = os.stat(filename)
owner_name = pwd.getpwuid(stat_info.st_uid).pw_name
print 'owner : ' + owner_name
file_size = support.get_readable_filesize(stat_info.st_size)
print 'size : ' + file_size
print 'created : ' + time.ctime(stat_info.st_ctime)
print 'last modified : ' + time.ctime(stat_info.st_mtime)
else:
print 'file not found'
def make_executable(query):
"""make_executable(query) -- give executable permissions to a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
os.system('chmod +x '+filename)
else:
print 'file not found'
def search(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
print '''By default I\'ll start searching from HOME directory.\
But this usually takes time.
1 : Search from HOME directory
2 : Search from current directory
'''
location = int(raw_input('>> '))
if(location == 1):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.expanduser('~'))
elif(location == 2):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def search_new(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
if(len(keyword) > 0):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def add_to_path(query):
""" add_to_path(query) -- add user given path to environment PATH variable.
"""
new_entry = support.get_path(query)
if(new_entry):
print 'Adding '+new_entry+' to PATH variable.'
print '''1 : confirm
2 : cancel
'''
choice = int(raw_input('>> '))
if(choice == 1):
home_dir = os.path.expanduser('~')
bashrc = open(os.path.join(home_dir, ".bashrc"), "a")
bashrc.write('\n\nexport PATH=\"'+new_entry+':$PATH\"\n')
bashrc.close()
os.system('source '+os.path.join(os.path.expanduser('~'),'.bashrc'))
print 'Success!!'
print os.system('echo $PATH')
else:
print 'We were unable to extract the \'path\' from your query.'
def system_info(query):
"""system_info(query) -- print system specific information like OS, kernel,
architecture etc.
"""
proc = subprocess.Popen(["uname -o"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "operating system : "+str(out),
proc = subprocess.Popen(["uname"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel : "+str(out),
proc = subprocess.Popen(["uname -r"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel release : "+str(out),
proc = subprocess.Popen(["uname -m"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "architecture : "+str(out),
proc = subprocess.Popen(["uname -n"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "network node name : "+str(out), |
prthkms/alex | alex/handler.py | words | python | def words(query):
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.read().split())
else:
print 'File not found : ' + filename | lines(query) -- print the number of words in a given file | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/handler.py#L18-L26 | [
"def get_file_name(query):\n\t\"\"\"get_file_name(query) -> filename -- return the filename found in a\n\tgiven, found by matching a regular expression.\n\t\"\"\"\n\tmatch = re.search(r'\\S*\\.[\\d\\w]{1,4}', query)\n\tif(match):\n\t\tfilename = match.group()\n\t\treturn filename\n\telse:\n\t\tstart = match.start()\n\t\tend = match.end()\n\t\tspaces = re.finditer(r' ', query)\n\t\tspace_index = []\n\t\tfor space in spaces:\n\t\t\tspace_index.append(space.start())\n\t\tspace_index.pop()\n\t\tfor i in space_index:\n\t\t\tfilename = query[i+1:end]\n\t\t\tif(os.path.isfile(filename)):\n\t\t\t\treturn filename\n\t\treturn None\n"
] | import re
import os
import support
import pwd
import time
import subprocess
def lines(query):
"""lines(query) -- print the number of lines in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.readlines())
else:
print 'File not found : ' + filename
def file_info(query):
"""file_info(query) -- print some human readable information of a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
stat_info = os.stat(filename)
owner_name = pwd.getpwuid(stat_info.st_uid).pw_name
print 'owner : ' + owner_name
file_size = support.get_readable_filesize(stat_info.st_size)
print 'size : ' + file_size
print 'created : ' + time.ctime(stat_info.st_ctime)
print 'last modified : ' + time.ctime(stat_info.st_mtime)
else:
print 'file not found'
def make_executable(query):
"""make_executable(query) -- give executable permissions to a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
os.system('chmod +x '+filename)
else:
print 'file not found'
def search(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
print '''By default I\'ll start searching from HOME directory.\
But this usually takes time.
1 : Search from HOME directory
2 : Search from current directory
'''
location = int(raw_input('>> '))
if(location == 1):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.expanduser('~'))
elif(location == 2):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def search_new(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
if(len(keyword) > 0):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def add_to_path(query):
""" add_to_path(query) -- add user given path to environment PATH variable.
"""
new_entry = support.get_path(query)
if(new_entry):
print 'Adding '+new_entry+' to PATH variable.'
print '''1 : confirm
2 : cancel
'''
choice = int(raw_input('>> '))
if(choice == 1):
home_dir = os.path.expanduser('~')
bashrc = open(os.path.join(home_dir, ".bashrc"), "a")
bashrc.write('\n\nexport PATH=\"'+new_entry+':$PATH\"\n')
bashrc.close()
os.system('source '+os.path.join(os.path.expanduser('~'),'.bashrc'))
print 'Success!!'
print os.system('echo $PATH')
else:
print 'We were unable to extract the \'path\' from your query.'
def system_info(query):
"""system_info(query) -- print system specific information like OS, kernel,
architecture etc.
"""
proc = subprocess.Popen(["uname -o"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "operating system : "+str(out),
proc = subprocess.Popen(["uname"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel : "+str(out),
proc = subprocess.Popen(["uname -r"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel release : "+str(out),
proc = subprocess.Popen(["uname -m"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "architecture : "+str(out),
proc = subprocess.Popen(["uname -n"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "network node name : "+str(out), |
prthkms/alex | alex/handler.py | file_info | python | def file_info(query):
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
stat_info = os.stat(filename)
owner_name = pwd.getpwuid(stat_info.st_uid).pw_name
print 'owner : ' + owner_name
file_size = support.get_readable_filesize(stat_info.st_size)
print 'size : ' + file_size
print 'created : ' + time.ctime(stat_info.st_ctime)
print 'last modified : ' + time.ctime(stat_info.st_mtime)
else:
print 'file not found' | file_info(query) -- print some human readable information of a given file | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/handler.py#L28-L41 | [
"def get_file_name(query):\n\t\"\"\"get_file_name(query) -> filename -- return the filename found in a\n\tgiven, found by matching a regular expression.\n\t\"\"\"\n\tmatch = re.search(r'\\S*\\.[\\d\\w]{1,4}', query)\n\tif(match):\n\t\tfilename = match.group()\n\t\treturn filename\n\telse:\n\t\tstart = match.start()\n\t\tend = match.end()\n\t\tspaces = re.finditer(r' ', query)\n\t\tspace_index = []\n\t\tfor space in spaces:\n\t\t\tspace_index.append(space.start())\n\t\tspace_index.pop()\n\t\tfor i in space_index:\n\t\t\tfilename = query[i+1:end]\n\t\t\tif(os.path.isfile(filename)):\n\t\t\t\treturn filename\n\t\treturn None\n",
"def get_readable_filesize(size):\n\t\"\"\"get_readable_filesize(size) -> filesize -- return human readable \n\tfilesize from given size in bytes.\n\t\"\"\"\n\tif(size < 1024):\n\t\treturn str(size)+' bytes'\n\ttemp = size/1024.0\n\tlevel = 1\n\twhile(temp >= 1024 and level< 3):\n\t\ttemp = temp/1024\n\t\tlevel += 1\n\tif(level == 1):\n\t\treturn str(round(temp,2))+' KB'\n\telif(level == 2):\n\t\treturn str(round(temp,2))+' MB'\n\telse:\n\t\treturn str(round(temp,2))+' GB'"
] | import re
import os
import support
import pwd
import time
import subprocess
def lines(query):
"""lines(query) -- print the number of lines in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.readlines())
else:
print 'File not found : ' + filename
def words(query):
"""lines(query) -- print the number of words in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.read().split())
else:
print 'File not found : ' + filename
def make_executable(query):
"""make_executable(query) -- give executable permissions to a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
os.system('chmod +x '+filename)
else:
print 'file not found'
def search(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
print '''By default I\'ll start searching from HOME directory.\
But this usually takes time.
1 : Search from HOME directory
2 : Search from current directory
'''
location = int(raw_input('>> '))
if(location == 1):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.expanduser('~'))
elif(location == 2):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def search_new(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
if(len(keyword) > 0):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def add_to_path(query):
""" add_to_path(query) -- add user given path to environment PATH variable.
"""
new_entry = support.get_path(query)
if(new_entry):
print 'Adding '+new_entry+' to PATH variable.'
print '''1 : confirm
2 : cancel
'''
choice = int(raw_input('>> '))
if(choice == 1):
home_dir = os.path.expanduser('~')
bashrc = open(os.path.join(home_dir, ".bashrc"), "a")
bashrc.write('\n\nexport PATH=\"'+new_entry+':$PATH\"\n')
bashrc.close()
os.system('source '+os.path.join(os.path.expanduser('~'),'.bashrc'))
print 'Success!!'
print os.system('echo $PATH')
else:
print 'We were unable to extract the \'path\' from your query.'
def system_info(query):
"""system_info(query) -- print system specific information like OS, kernel,
architecture etc.
"""
proc = subprocess.Popen(["uname -o"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "operating system : "+str(out),
proc = subprocess.Popen(["uname"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel : "+str(out),
proc = subprocess.Popen(["uname -r"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel release : "+str(out),
proc = subprocess.Popen(["uname -m"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "architecture : "+str(out),
proc = subprocess.Popen(["uname -n"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "network node name : "+str(out), |
prthkms/alex | alex/handler.py | make_executable | python | def make_executable(query):
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
os.system('chmod +x '+filename)
else:
print 'file not found' | make_executable(query) -- give executable permissions to a given file | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/handler.py#L43-L50 | [
"def get_file_name(query):\n\t\"\"\"get_file_name(query) -> filename -- return the filename found in a\n\tgiven, found by matching a regular expression.\n\t\"\"\"\n\tmatch = re.search(r'\\S*\\.[\\d\\w]{1,4}', query)\n\tif(match):\n\t\tfilename = match.group()\n\t\treturn filename\n\telse:\n\t\tstart = match.start()\n\t\tend = match.end()\n\t\tspaces = re.finditer(r' ', query)\n\t\tspace_index = []\n\t\tfor space in spaces:\n\t\t\tspace_index.append(space.start())\n\t\tspace_index.pop()\n\t\tfor i in space_index:\n\t\t\tfilename = query[i+1:end]\n\t\t\tif(os.path.isfile(filename)):\n\t\t\t\treturn filename\n\t\treturn None\n"
] | import re
import os
import support
import pwd
import time
import subprocess
def lines(query):
"""lines(query) -- print the number of lines in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.readlines())
else:
print 'File not found : ' + filename
def words(query):
"""lines(query) -- print the number of words in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.read().split())
else:
print 'File not found : ' + filename
def file_info(query):
"""file_info(query) -- print some human readable information of a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
stat_info = os.stat(filename)
owner_name = pwd.getpwuid(stat_info.st_uid).pw_name
print 'owner : ' + owner_name
file_size = support.get_readable_filesize(stat_info.st_size)
print 'size : ' + file_size
print 'created : ' + time.ctime(stat_info.st_ctime)
print 'last modified : ' + time.ctime(stat_info.st_mtime)
else:
print 'file not found'
def search(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
print '''By default I\'ll start searching from HOME directory.\
But this usually takes time.
1 : Search from HOME directory
2 : Search from current directory
'''
location = int(raw_input('>> '))
if(location == 1):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.expanduser('~'))
elif(location == 2):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def search_new(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
if(len(keyword) > 0):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def add_to_path(query):
""" add_to_path(query) -- add user given path to environment PATH variable.
"""
new_entry = support.get_path(query)
if(new_entry):
print 'Adding '+new_entry+' to PATH variable.'
print '''1 : confirm
2 : cancel
'''
choice = int(raw_input('>> '))
if(choice == 1):
home_dir = os.path.expanduser('~')
bashrc = open(os.path.join(home_dir, ".bashrc"), "a")
bashrc.write('\n\nexport PATH=\"'+new_entry+':$PATH\"\n')
bashrc.close()
os.system('source '+os.path.join(os.path.expanduser('~'),'.bashrc'))
print 'Success!!'
print os.system('echo $PATH')
else:
print 'We were unable to extract the \'path\' from your query.'
def system_info(query):
"""system_info(query) -- print system specific information like OS, kernel,
architecture etc.
"""
proc = subprocess.Popen(["uname -o"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "operating system : "+str(out),
proc = subprocess.Popen(["uname"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel : "+str(out),
proc = subprocess.Popen(["uname -r"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel release : "+str(out),
proc = subprocess.Popen(["uname -m"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "architecture : "+str(out),
proc = subprocess.Popen(["uname -n"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "network node name : "+str(out), |
prthkms/alex | alex/handler.py | add_to_path | python | def add_to_path(query):
new_entry = support.get_path(query)
if(new_entry):
print 'Adding '+new_entry+' to PATH variable.'
print '''1 : confirm
2 : cancel
'''
choice = int(raw_input('>> '))
if(choice == 1):
home_dir = os.path.expanduser('~')
bashrc = open(os.path.join(home_dir, ".bashrc"), "a")
bashrc.write('\n\nexport PATH=\"'+new_entry+':$PATH\"\n')
bashrc.close()
os.system('source '+os.path.join(os.path.expanduser('~'),'.bashrc'))
print 'Success!!'
print os.system('echo $PATH')
else:
print 'We were unable to extract the \'path\' from your query.' | add_to_path(query) -- add user given path to environment PATH variable. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/handler.py#L110-L129 | [
"def get_path(query):\n\t\"\"\"get_path(query) -> pathname -- return the path found in a\n\tgiven, found by matching a regular expression.\n\t\"\"\"\n\tmatch = re.search(r'/(.*/)+(\\S*(\\.[\\d\\w]{1,4})?)', query)\n\tif(os.path.isfile(match.group()) or os.path.isdir(match.group())):\n\t\treturn match.group()\n\telse:\n\t\treturn None\n"
] | import re
import os
import support
import pwd
import time
import subprocess
def lines(query):
"""lines(query) -- print the number of lines in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.readlines())
else:
print 'File not found : ' + filename
def words(query):
"""lines(query) -- print the number of words in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.read().split())
else:
print 'File not found : ' + filename
def file_info(query):
"""file_info(query) -- print some human readable information of a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
stat_info = os.stat(filename)
owner_name = pwd.getpwuid(stat_info.st_uid).pw_name
print 'owner : ' + owner_name
file_size = support.get_readable_filesize(stat_info.st_size)
print 'size : ' + file_size
print 'created : ' + time.ctime(stat_info.st_ctime)
print 'last modified : ' + time.ctime(stat_info.st_mtime)
else:
print 'file not found'
def make_executable(query):
"""make_executable(query) -- give executable permissions to a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
os.system('chmod +x '+filename)
else:
print 'file not found'
def search(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
print '''By default I\'ll start searching from HOME directory.\
But this usually takes time.
1 : Search from HOME directory
2 : Search from current directory
'''
location = int(raw_input('>> '))
if(location == 1):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.expanduser('~'))
elif(location == 2):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def search_new(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
if(len(keyword) > 0):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def system_info(query):
"""system_info(query) -- print system specific information like OS, kernel,
architecture etc.
"""
proc = subprocess.Popen(["uname -o"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "operating system : "+str(out),
proc = subprocess.Popen(["uname"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel : "+str(out),
proc = subprocess.Popen(["uname -r"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel release : "+str(out),
proc = subprocess.Popen(["uname -m"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "architecture : "+str(out),
proc = subprocess.Popen(["uname -n"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "network node name : "+str(out), |
prthkms/alex | alex/handler.py | system_info | python | def system_info(query):
proc = subprocess.Popen(["uname -o"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "operating system : "+str(out),
proc = subprocess.Popen(["uname"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel : "+str(out),
proc = subprocess.Popen(["uname -r"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "kernel release : "+str(out),
proc = subprocess.Popen(["uname -m"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "architecture : "+str(out),
proc = subprocess.Popen(["uname -n"], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "network node name : "+str(out), | system_info(query) -- print system specific information like OS, kernel,
architecture etc. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/handler.py#L131-L153 | null | import re
import os
import support
import pwd
import time
import subprocess
def lines(query):
"""lines(query) -- print the number of lines in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.readlines())
else:
print 'File not found : ' + filename
def words(query):
"""lines(query) -- print the number of words in a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
with open(filename) as openfile:
print len(openfile.read().split())
else:
print 'File not found : ' + filename
def file_info(query):
"""file_info(query) -- print some human readable information of a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
stat_info = os.stat(filename)
owner_name = pwd.getpwuid(stat_info.st_uid).pw_name
print 'owner : ' + owner_name
file_size = support.get_readable_filesize(stat_info.st_size)
print 'size : ' + file_size
print 'created : ' + time.ctime(stat_info.st_ctime)
print 'last modified : ' + time.ctime(stat_info.st_mtime)
else:
print 'file not found'
def make_executable(query):
"""make_executable(query) -- give executable permissions to a given file
"""
filename = support.get_file_name(query)
if(os.path.isfile(filename)):
os.system('chmod +x '+filename)
else:
print 'file not found'
def search(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
print '''By default I\'ll start searching from HOME directory.\
But this usually takes time.
1 : Search from HOME directory
2 : Search from current directory
'''
location = int(raw_input('>> '))
if(location == 1):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.expanduser('~'))
elif(location == 2):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def search_new(query):
print '''I\'m a little confused. Please enter a choice
1 : Search for file by its name
2 : Search for files which contain a keyword
'''
try:
choice = int(raw_input('>> '))
if(choice == 1):
filename = support.get_file_name(query)
if(filename):
os.system('locate -b \'\\'+filename+'\'')
else:
print 'not able to get the filename'
elif(choice == 2):
keyword = raw_input('Enter keyword : ')
if(len(keyword) > 0):
os.system('grep -i -n -r \''+keyword+'\' '+os.path.abspath(os.curdir))
else:
print 'Invalid input'
else:
print 'Invalid input'
return
except:
print 'Something went wrong. Most likely its an input error. Please try again'
def add_to_path(query):
""" add_to_path(query) -- add user given path to environment PATH variable.
"""
new_entry = support.get_path(query)
if(new_entry):
print 'Adding '+new_entry+' to PATH variable.'
print '''1 : confirm
2 : cancel
'''
choice = int(raw_input('>> '))
if(choice == 1):
home_dir = os.path.expanduser('~')
bashrc = open(os.path.join(home_dir, ".bashrc"), "a")
bashrc.write('\n\nexport PATH=\"'+new_entry+':$PATH\"\n')
bashrc.close()
os.system('source '+os.path.join(os.path.expanduser('~'),'.bashrc'))
print 'Success!!'
print os.system('echo $PATH')
else:
print 'We were unable to extract the \'path\' from your query.'
|
prthkms/alex | alex/duckduckgo.py | parse_result | python | def parse_result(result):
if(result['Type'] == 'D'):
print """There is more than one answer for this. Try making your query\
more specific. For example, if you want to learn about apple the company\
and not apple the fruit, try something like apple inc or apple computers.
"""
elif(result['Type'] == 'A'):
print result['AbstractText']
print '\nResults from DuckDuckGo'
elif(result['Type'] == 'C'):
for entry in result['RelatedTopics']:
print entry['Text']
print "\n"
else:
print "I do not know how to process this query at the moment." | parse_result(json result) -- print the web query according to the type
of result from duckduckgo. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/duckduckgo.py#L8-L28 | null | import json
import requests
# example queries:
# duckduckgo.query("Mahatma Gandhi")
# duckduckgo.query("friends characters")
def parse_result(result):
"""parse_result(json result) -- print the web query according to the type
of result from duckduckgo.
"""
if(result['Type'] == 'D'):
print """There is more than one answer for this. Try making your query\
more specific. For example, if you want to learn about apple the company\
and not apple the fruit, try something like apple inc or apple computers.
"""
elif(result['Type'] == 'A'):
print result['AbstractText']
print '\nResults from DuckDuckGo'
elif(result['Type'] == 'C'):
for entry in result['RelatedTopics']:
print entry['Text']
print "\n"
else:
print "I do not know how to process this query at the moment."
def query(string):
"""query(user string) -- make http request to duckduckgo api, to get result
in json format, then call parse_result.
"""
url = "https://api.duckduckgo.com/?q="
formating = "&format=json"
query_string = url+'+'.join(string)+formating
try:
result = json.loads(requests.get(query_string).text)
except:
print "I'm sorry! Something went wrong. Maybe we could try again later."
return
parse_result(result)
if __name__ == '__main__':
import sys
if(len(sys.argv) > 1):
query(sys.argv[1:]) |
prthkms/alex | alex/duckduckgo.py | query | python | def query(string):
url = "https://api.duckduckgo.com/?q="
formating = "&format=json"
query_string = url+'+'.join(string)+formating
try:
result = json.loads(requests.get(query_string).text)
except:
print "I'm sorry! Something went wrong. Maybe we could try again later."
return
parse_result(result) | query(user string) -- make http request to duckduckgo api, to get result
in json format, then call parse_result. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/duckduckgo.py#L30-L42 | [
"def parse_result(result):\n\t\"\"\"parse_result(json result) -- print the web query according to the type \n\tof result from duckduckgo.\n\t\"\"\"\n\n\tif(result['Type'] == 'D'):\n\t\tprint \"\"\"There is more than one answer for this. Try making your query\\\n\t\tmore specific. For example, if you want to learn about apple the company\\\n\t\tand not apple the fruit, try something like apple inc or apple computers. \n\t\t\"\"\"\n\n\telif(result['Type'] == 'A'):\n\t\tprint result['AbstractText']\n\t\tprint '\\nResults from DuckDuckGo'\n\n\telif(result['Type'] == 'C'):\n\t\tfor entry in result['RelatedTopics']:\n\t\t\tprint entry['Text']\n\t\t\tprint \"\\n\"\n\telse:\n\t\tprint \"I do not know how to process this query at the moment.\"\n"
] | import json
import requests
# example queries:
# duckduckgo.query("Mahatma Gandhi")
# duckduckgo.query("friends characters")
def parse_result(result):
"""parse_result(json result) -- print the web query according to the type
of result from duckduckgo.
"""
if(result['Type'] == 'D'):
print """There is more than one answer for this. Try making your query\
more specific. For example, if you want to learn about apple the company\
and not apple the fruit, try something like apple inc or apple computers.
"""
elif(result['Type'] == 'A'):
print result['AbstractText']
print '\nResults from DuckDuckGo'
elif(result['Type'] == 'C'):
for entry in result['RelatedTopics']:
print entry['Text']
print "\n"
else:
print "I do not know how to process this query at the moment."
if __name__ == '__main__':
import sys
if(len(sys.argv) > 1):
query(sys.argv[1:]) |
prthkms/alex | alex/support.py | assign_handler | python | def assign_handler(query, category):
if(category == 'count lines'):
handler.lines(query)
elif(category == 'count words'):
handler.words(query)
elif(category == 'weather'):
web.weather(query)
elif(category == 'no match'):
web.generic(query)
elif(category == 'file info'):
handler.file_info(query)
elif(category == 'executable'):
handler.make_executable(query)
elif(category == 'search'):
handler.search(query)
elif(category == 'path'):
handler.add_to_path(query)
elif(category == 'uname'):
handler.system_info(query)
else:
print 'I\'m not able to understand your query' | assign_handler(query, category) -- assign the user's query to a
particular category, and call the appropriate handler. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/support.py#L6-L29 | [
"def search(query):\n\tprint '''I\\'m a little confused. Please enter a choice\n\t1 : Search for file by its name\n\t2 : Search for files which contain a keyword\n\t'''\n\ttry:\n\t\tchoice = int(raw_input('>> '))\n\t\tif(choice == 1):\n\t\t\tfilename = support.get_file_name(query)\n\t\t\tif(filename):\n\t\t\t\tos.system('locate -b \\'\\\\'+filename+'\\'')\n\t\t\telse:\n\t\t\t\tprint 'not able to get the filename'\n\t\telif(choice == 2):\n\t\t\tkeyword = raw_input('Enter keyword : ')\n\t\t\tprint '''By default I\\'ll start searching from HOME directory.\\\n\t\t\tBut this usually takes time.\n\t\t\t1 : Search from HOME directory\n\t\t\t2 : Search from current directory\n\t\t\t'''\n\t\t\tlocation = int(raw_input('>> '))\n\t\t\tif(location == 1):\n\t\t\t\tos.system('grep -i -n -r \\''+keyword+'\\' '+os.path.expanduser('~'))\n\t\t\telif(location == 2):\n\t\t\t\tos.system('grep -i -n -r \\''+keyword+'\\' '+os.path.abspath(os.curdir))\n\t\t\telse:\n\t\t\t\tprint 'Invalid input'\n\t\telse:\n\t\t\tprint 'Invalid input'\n\t\t\treturn\n\texcept:\n\t\tprint 'Something went wrong. Most likely its an input error. Please try again'\n",
"def lines(query):\n\t\"\"\"lines(query) -- print the number of lines in a given file\n\t\"\"\"\n\tfilename = support.get_file_name(query)\n\tif(os.path.isfile(filename)):\n\t\twith open(filename) as openfile:\n\t\t\tprint len(openfile.readlines())\n\telse:\n\t\tprint 'File not found : ' + filename\n",
"def words(query):\n\t\"\"\"lines(query) -- print the number of words in a given file\n\t\"\"\"\n\tfilename = support.get_file_name(query)\n\tif(os.path.isfile(filename)):\n\t\twith open(filename) as openfile:\n\t\t\tprint len(openfile.read().split())\n\telse:\n\t\tprint 'File not found : ' + filename\n",
"def file_info(query):\n\t\"\"\"file_info(query) -- print some human readable information of a given file\n\t\"\"\"\n\tfilename = support.get_file_name(query)\n\tif(os.path.isfile(filename)):\n\t\tstat_info = os.stat(filename)\n\t\towner_name = pwd.getpwuid(stat_info.st_uid).pw_name\n\t\tprint 'owner : ' + owner_name\n\t\tfile_size = support.get_readable_filesize(stat_info.st_size)\n\t\tprint 'size : ' + file_size\n\t\tprint 'created : ' + time.ctime(stat_info.st_ctime)\n\t\tprint 'last modified : ' + time.ctime(stat_info.st_mtime)\n\telse:\n\t\tprint 'file not found'\n",
"def make_executable(query):\n\t\"\"\"make_executable(query) -- give executable permissions to a given file\n\t\"\"\"\n\tfilename = support.get_file_name(query)\n\tif(os.path.isfile(filename)):\n\t\tos.system('chmod +x '+filename)\n\telse:\n\t\tprint 'file not found'\n",
"def add_to_path(query):\n\t\"\"\" add_to_path(query) -- add user given path to environment PATH variable.\n\t\"\"\"\n\tnew_entry = support.get_path(query)\n\tif(new_entry):\n\t\tprint 'Adding '+new_entry+' to PATH variable.'\n\t\tprint '''1 : confirm\n\t\t2 : cancel\n\t\t'''\n\t\tchoice = int(raw_input('>> '))\n\t\tif(choice == 1):\n\t\t\thome_dir = os.path.expanduser('~')\n\t\t\tbashrc = open(os.path.join(home_dir, \".bashrc\"), \"a\")\n\t\t\tbashrc.write('\\n\\nexport PATH=\\\"'+new_entry+':$PATH\\\"\\n')\n\t\t\tbashrc.close()\n\t\t\tos.system('source '+os.path.join(os.path.expanduser('~'),'.bashrc'))\n\t\t\tprint 'Success!!'\n\t\t\tprint os.system('echo $PATH')\n\telse:\n\t\tprint 'We were unable to extract the \\'path\\' from your query.'\n",
"def system_info(query):\n\t\"\"\"system_info(query) -- print system specific information like OS, kernel,\n\tarchitecture etc.\n\t\"\"\"\n\tproc = subprocess.Popen([\"uname -o\"], stdout=subprocess.PIPE, shell=True)\n\t(out, err) = proc.communicate()\n\tprint \"operating system : \"+str(out),\n\n\tproc = subprocess.Popen([\"uname\"], stdout=subprocess.PIPE, shell=True)\n\t(out, err) = proc.communicate()\n\tprint \"kernel : \"+str(out),\n\n\tproc = subprocess.Popen([\"uname -r\"], stdout=subprocess.PIPE, shell=True)\n\t(out, err) = proc.communicate()\n\tprint \"kernel release : \"+str(out),\n\n\tproc = subprocess.Popen([\"uname -m\"], stdout=subprocess.PIPE, shell=True)\n\t(out, err) = proc.communicate()\n\tprint \"architecture : \"+str(out),\n\n\tproc = subprocess.Popen([\"uname -n\"], stdout=subprocess.PIPE, shell=True)\n\t(out, err) = proc.communicate()\n\tprint \"network node name : \"+str(out),",
"def weather(query):\n\t\"\"\"weather(query) -- use Name Entity Recogniser (nltk-stanford-ner), to \n\tdetermine location entity in query and fetch weather info for that location\n\t(using yahoo apis).\n\t\"\"\"\n\n\tprint 'Identifying the location . . .'\n\ttry:\n\t\tresponse = unirest.post(\"https://textanalysis.p.mashape.com/nltk-stanford-ner\",\n\t \t\theaders={\n\t \t\"X-Mashape-Key\": \"E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP\",\n\t \t\"Content-Type\": \"application/x-www-form-urlencoded\"\n\t \t\t\t},\n\t \t\tparams={\n\t \t\"text\": query\n\t \t\t}\n\t\t)\n\texcept:\n\t\tprint 'Unable to connect to internet'\n\t\treturn\n\tlocation = ''\n\tfor entity in response.body['result'].split():\n\t\tword,tag = entity.split('/')\n\t\tif(tag == 'LOCATION'):\n\t\t\tlocation += ' '+word\n\tif(location != ''):\n\t\tprint 'Gathering weather information for'+location\n\t\timport urllib2, urllib, json\n\t\tbaseurl = \"https://query.yahooapis.com/v1/public/yql?\"\n\t\tyql_query = \"select * from weather.forecast where woeid in \\\n\t\t(select woeid from geo.places(1) where text=\\\"\"+location+\"\\\")\"\n\t\tyql_url = baseurl + urllib.urlencode({'q':yql_query}) + \"&format=json\"\n\t\ttry:\n\t\t\tresult = urllib2.urlopen(yql_url).read()\n\t\t\tdata = json.loads(result)\n\t\t\tresult = data['query']['results']['channel']\n\t\t\tprint result['location']['city']+' '+result['location']['country']+' '+result['location']['region']\n\t\t\tprint result['item']['condition']['date']\n\t\t\tprint result['item']['condition']['text']\n\t\t\tprint result['item']['condition']['temp']+' '+result['units']['temperature'] \n\t\texcept:\n\t\t\tprint 'Unable to connect to internet'\n\telse:\n\t\tprint 'Unable to get the location.'\n",
"def generic(query):\n\t\"\"\" generic(query) -- process a generic user query using the Stanford \n\tNLTK NER and duckduckgo api.\n\t\"\"\"\n\n\ttry:\n\t\tresponse = unirest.post(\"https://textanalysis.p.mashape.com/nltk-stanford-ner\",\n\t \t\theaders={\n\t \t\"X-Mashape-Key\": \"E7WffsNDbNmshj4aVC4NUwj9dT9ep1S2cc3jsnFp5wSCzNBiaP\",\n\t \t\"Content-Type\": \"application/x-www-form-urlencoded\"\n\t \t\t\t},\n\t \t\tparams={\n\t \t\"text\": query\n\t \t\t}\n\t\t)\n\texcept:\n\t\tprint 'Unable to connect to internet'\n\t\treturn\n\tweb_query = ''\n\tfor entity in response.body['result'].split():\n\t\tword,tag = entity.split('/')\n\t\tif(tag != 'O'):\n\t\t\tweb_query += ' '+word\n\tif(web_query != ''):\n\t\tweb_query = web_query.strip().split()\n\t\tduckduckgo.query(web_query)\n\telse:\n\t\tprint 'I do not know how to process this query at this moment.'\n"
] | import handler
import web
import re
import os
def get_file_name(query):
"""get_file_name(query) -> filename -- return the filename found in a
given, found by matching a regular expression.
"""
match = re.search(r'\S*\.[\d\w]{1,4}', query)
if(match):
filename = match.group()
return filename
else:
start = match.start()
end = match.end()
spaces = re.finditer(r' ', query)
space_index = []
for space in spaces:
space_index.append(space.start())
space_index.pop()
for i in space_index:
filename = query[i+1:end]
if(os.path.isfile(filename)):
return filename
return None
def get_path(query):
"""get_path(query) -> pathname -- return the path found in a
given, found by matching a regular expression.
"""
match = re.search(r'/(.*/)+(\S*(\.[\d\w]{1,4})?)', query)
if(os.path.isfile(match.group()) or os.path.isdir(match.group())):
return match.group()
else:
return None
def get_readable_filesize(size):
"""get_readable_filesize(size) -> filesize -- return human readable
filesize from given size in bytes.
"""
if(size < 1024):
return str(size)+' bytes'
temp = size/1024.0
level = 1
while(temp >= 1024 and level< 3):
temp = temp/1024
level += 1
if(level == 1):
return str(round(temp,2))+' KB'
elif(level == 2):
return str(round(temp,2))+' MB'
else:
return str(round(temp,2))+' GB' |
prthkms/alex | alex/support.py | get_file_name | python | def get_file_name(query):
match = re.search(r'\S*\.[\d\w]{1,4}', query)
if(match):
filename = match.group()
return filename
else:
start = match.start()
end = match.end()
spaces = re.finditer(r' ', query)
space_index = []
for space in spaces:
space_index.append(space.start())
space_index.pop()
for i in space_index:
filename = query[i+1:end]
if(os.path.isfile(filename)):
return filename
return None | get_file_name(query) -> filename -- return the filename found in a
given, found by matching a regular expression. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/support.py#L31-L51 | null | import handler
import web
import re
import os
def assign_handler(query, category):
"""assign_handler(query, category) -- assign the user's query to a
particular category, and call the appropriate handler.
"""
if(category == 'count lines'):
handler.lines(query)
elif(category == 'count words'):
handler.words(query)
elif(category == 'weather'):
web.weather(query)
elif(category == 'no match'):
web.generic(query)
elif(category == 'file info'):
handler.file_info(query)
elif(category == 'executable'):
handler.make_executable(query)
elif(category == 'search'):
handler.search(query)
elif(category == 'path'):
handler.add_to_path(query)
elif(category == 'uname'):
handler.system_info(query)
else:
print 'I\'m not able to understand your query'
def get_path(query):
"""get_path(query) -> pathname -- return the path found in a
given, found by matching a regular expression.
"""
match = re.search(r'/(.*/)+(\S*(\.[\d\w]{1,4})?)', query)
if(os.path.isfile(match.group()) or os.path.isdir(match.group())):
return match.group()
else:
return None
def get_readable_filesize(size):
"""get_readable_filesize(size) -> filesize -- return human readable
filesize from given size in bytes.
"""
if(size < 1024):
return str(size)+' bytes'
temp = size/1024.0
level = 1
while(temp >= 1024 and level< 3):
temp = temp/1024
level += 1
if(level == 1):
return str(round(temp,2))+' KB'
elif(level == 2):
return str(round(temp,2))+' MB'
else:
return str(round(temp,2))+' GB' |
prthkms/alex | alex/support.py | get_path | python | def get_path(query):
match = re.search(r'/(.*/)+(\S*(\.[\d\w]{1,4})?)', query)
if(os.path.isfile(match.group()) or os.path.isdir(match.group())):
return match.group()
else:
return None | get_path(query) -> pathname -- return the path found in a
given, found by matching a regular expression. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/support.py#L53-L61 | null | import handler
import web
import re
import os
def assign_handler(query, category):
"""assign_handler(query, category) -- assign the user's query to a
particular category, and call the appropriate handler.
"""
if(category == 'count lines'):
handler.lines(query)
elif(category == 'count words'):
handler.words(query)
elif(category == 'weather'):
web.weather(query)
elif(category == 'no match'):
web.generic(query)
elif(category == 'file info'):
handler.file_info(query)
elif(category == 'executable'):
handler.make_executable(query)
elif(category == 'search'):
handler.search(query)
elif(category == 'path'):
handler.add_to_path(query)
elif(category == 'uname'):
handler.system_info(query)
else:
print 'I\'m not able to understand your query'
def get_file_name(query):
"""get_file_name(query) -> filename -- return the filename found in a
given, found by matching a regular expression.
"""
match = re.search(r'\S*\.[\d\w]{1,4}', query)
if(match):
filename = match.group()
return filename
else:
start = match.start()
end = match.end()
spaces = re.finditer(r' ', query)
space_index = []
for space in spaces:
space_index.append(space.start())
space_index.pop()
for i in space_index:
filename = query[i+1:end]
if(os.path.isfile(filename)):
return filename
return None
def get_readable_filesize(size):
"""get_readable_filesize(size) -> filesize -- return human readable
filesize from given size in bytes.
"""
if(size < 1024):
return str(size)+' bytes'
temp = size/1024.0
level = 1
while(temp >= 1024 and level< 3):
temp = temp/1024
level += 1
if(level == 1):
return str(round(temp,2))+' KB'
elif(level == 2):
return str(round(temp,2))+' MB'
else:
return str(round(temp,2))+' GB' |
prthkms/alex | alex/support.py | get_readable_filesize | python | def get_readable_filesize(size):
if(size < 1024):
return str(size)+' bytes'
temp = size/1024.0
level = 1
while(temp >= 1024 and level< 3):
temp = temp/1024
level += 1
if(level == 1):
return str(round(temp,2))+' KB'
elif(level == 2):
return str(round(temp,2))+' MB'
else:
return str(round(temp,2))+' GB' | get_readable_filesize(size) -> filesize -- return human readable
filesize from given size in bytes. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/support.py#L63-L79 | null | import handler
import web
import re
import os
def assign_handler(query, category):
"""assign_handler(query, category) -- assign the user's query to a
particular category, and call the appropriate handler.
"""
if(category == 'count lines'):
handler.lines(query)
elif(category == 'count words'):
handler.words(query)
elif(category == 'weather'):
web.weather(query)
elif(category == 'no match'):
web.generic(query)
elif(category == 'file info'):
handler.file_info(query)
elif(category == 'executable'):
handler.make_executable(query)
elif(category == 'search'):
handler.search(query)
elif(category == 'path'):
handler.add_to_path(query)
elif(category == 'uname'):
handler.system_info(query)
else:
print 'I\'m not able to understand your query'
def get_file_name(query):
"""get_file_name(query) -> filename -- return the filename found in a
given, found by matching a regular expression.
"""
match = re.search(r'\S*\.[\d\w]{1,4}', query)
if(match):
filename = match.group()
return filename
else:
start = match.start()
end = match.end()
spaces = re.finditer(r' ', query)
space_index = []
for space in spaces:
space_index.append(space.start())
space_index.pop()
for i in space_index:
filename = query[i+1:end]
if(os.path.isfile(filename)):
return filename
return None
def get_path(query):
"""get_path(query) -> pathname -- return the path found in a
given, found by matching a regular expression.
"""
match = re.search(r'/(.*/)+(\S*(\.[\d\w]{1,4})?)', query)
if(os.path.isfile(match.group()) or os.path.isdir(match.group())):
return match.group()
else:
return None
|
mickybart/python-atlasbroker | atlasbroker/service.py | AtlasBroker.provision | python | def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec:
if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER:
# Provision the instance on an Existing Atlas Cluster
# Find or create the instance
instance = self._backend.find(instance_id)
# Create the instance if needed
return self._backend.create(instance, service_details.parameters, existing=True)
# Plan not supported
raise ErrPlanUnsupported(service_details.plan_id) | Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/service.py#L74-L93 | null | class AtlasBroker(ServiceBroker):
"""Atlas Broker
Implement a service broker by overriding methods of Service
Constructor
Args:
config (config): Configuration of the broker
"""
def __init__(self, config):
# Create the AtlasBrokerBackend
self._backend = AtlasBrokerBackend(config)
self._config = config
def catalog(self):
return Service(
id=self._config.broker["id"],
name=self._config.broker["name"],
description=self._config.broker["description"],
bindable=self._config.broker["bindable"],
plans=self._config.broker["plans"],
tags=self._config.broker["tags"],
requires=self._config.broker["requires"],
metadata=self._config.broker["metadata"],
dashboard_client=self._config.broker["dashboard_client"],
plan_updateable=self._config.broker["plan_updateable"],
)
def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec:
"""Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec
"""
if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER:
# Provision the instance on an Existing Atlas Cluster
# Find or create the instance
instance = self._backend.find(instance_id)
# Create the instance if needed
return self._backend.create(instance, service_details.parameters, existing=True)
# Plan not supported
raise ErrPlanUnsupported(service_details.plan_id)
def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails):
"""Unbinding the instance
see openbrokerapi documentation
Raises:
ErrBindingDoesNotExist: Binding does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find the binding
binding = self._backend.find(binding_id, instance)
if not binding.isProvisioned():
# The binding does not exist
raise ErrBindingDoesNotExist()
# Delete the binding
self._backend.unbind(binding)
def update(self, instance_id: str, details: UpdateDetails, async_allowed: bool) -> UpdateServiceSpec:
"""Update
Not implemented. Not used by Kubernetes Service Catalog.
Raises:
NotImplementedError
"""
raise NotImplementedError()
def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding:
"""Binding the instance
see openbrokerapi documentation
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find or create the binding
binding = self._backend.find(binding_id, instance)
# Create the binding if needed
return self._backend.bind(binding, details.parameters)
def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec:
"""Deprovision an instance
see openbrokerapi documentation
Raises:
ErrInstanceDoesNotExist: Instance does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
if not instance.isProvisioned():
# the instance does not exist
raise ErrInstanceDoesNotExist()
return self._backend.delete(instance)
def last_operation(self, instance_id: str, operation_data: str) -> LastOperation:
"""Last Operation
Not implemented. We are not using asynchronous operation on Atlas Broker.
Raises:
NotImplementedError
"""
raise NotImplementedError()
|
mickybart/python-atlasbroker | atlasbroker/service.py | AtlasBroker.unbind | python | def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails):
# Find the instance
instance = self._backend.find(instance_id)
# Find the binding
binding = self._backend.find(binding_id, instance)
if not binding.isProvisioned():
# The binding does not exist
raise ErrBindingDoesNotExist()
# Delete the binding
self._backend.unbind(binding) | Unbinding the instance
see openbrokerapi documentation
Raises:
ErrBindingDoesNotExist: Binding does not exist. | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/service.py#L95-L114 | null | class AtlasBroker(ServiceBroker):
"""Atlas Broker
Implement a service broker by overriding methods of Service
Constructor
Args:
config (config): Configuration of the broker
"""
def __init__(self, config):
# Create the AtlasBrokerBackend
self._backend = AtlasBrokerBackend(config)
self._config = config
def catalog(self):
return Service(
id=self._config.broker["id"],
name=self._config.broker["name"],
description=self._config.broker["description"],
bindable=self._config.broker["bindable"],
plans=self._config.broker["plans"],
tags=self._config.broker["tags"],
requires=self._config.broker["requires"],
metadata=self._config.broker["metadata"],
dashboard_client=self._config.broker["dashboard_client"],
plan_updateable=self._config.broker["plan_updateable"],
)
def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec:
"""Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec
"""
if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER:
# Provision the instance on an Existing Atlas Cluster
# Find or create the instance
instance = self._backend.find(instance_id)
# Create the instance if needed
return self._backend.create(instance, service_details.parameters, existing=True)
# Plan not supported
raise ErrPlanUnsupported(service_details.plan_id)
def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails):
"""Unbinding the instance
see openbrokerapi documentation
Raises:
ErrBindingDoesNotExist: Binding does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find the binding
binding = self._backend.find(binding_id, instance)
if not binding.isProvisioned():
# The binding does not exist
raise ErrBindingDoesNotExist()
# Delete the binding
self._backend.unbind(binding)
def update(self, instance_id: str, details: UpdateDetails, async_allowed: bool) -> UpdateServiceSpec:
"""Update
Not implemented. Not used by Kubernetes Service Catalog.
Raises:
NotImplementedError
"""
raise NotImplementedError()
def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding:
"""Binding the instance
see openbrokerapi documentation
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find or create the binding
binding = self._backend.find(binding_id, instance)
# Create the binding if needed
return self._backend.bind(binding, details.parameters)
def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec:
"""Deprovision an instance
see openbrokerapi documentation
Raises:
ErrInstanceDoesNotExist: Instance does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
if not instance.isProvisioned():
# the instance does not exist
raise ErrInstanceDoesNotExist()
return self._backend.delete(instance)
def last_operation(self, instance_id: str, operation_data: str) -> LastOperation:
"""Last Operation
Not implemented. We are not using asynchronous operation on Atlas Broker.
Raises:
NotImplementedError
"""
raise NotImplementedError()
|
mickybart/python-atlasbroker | atlasbroker/service.py | AtlasBroker.bind | python | def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding:
# Find the instance
instance = self._backend.find(instance_id)
# Find or create the binding
binding = self._backend.find(binding_id, instance)
# Create the binding if needed
return self._backend.bind(binding, details.parameters) | Binding the instance
see openbrokerapi documentation | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/service.py#L126-L139 | null | class AtlasBroker(ServiceBroker):
"""Atlas Broker
Implement a service broker by overriding methods of Service
Constructor
Args:
config (config): Configuration of the broker
"""
def __init__(self, config):
# Create the AtlasBrokerBackend
self._backend = AtlasBrokerBackend(config)
self._config = config
def catalog(self):
return Service(
id=self._config.broker["id"],
name=self._config.broker["name"],
description=self._config.broker["description"],
bindable=self._config.broker["bindable"],
plans=self._config.broker["plans"],
tags=self._config.broker["tags"],
requires=self._config.broker["requires"],
metadata=self._config.broker["metadata"],
dashboard_client=self._config.broker["dashboard_client"],
plan_updateable=self._config.broker["plan_updateable"],
)
def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec:
"""Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec
"""
if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER:
# Provision the instance on an Existing Atlas Cluster
# Find or create the instance
instance = self._backend.find(instance_id)
# Create the instance if needed
return self._backend.create(instance, service_details.parameters, existing=True)
# Plan not supported
raise ErrPlanUnsupported(service_details.plan_id)
def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails):
"""Unbinding the instance
see openbrokerapi documentation
Raises:
ErrBindingDoesNotExist: Binding does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find the binding
binding = self._backend.find(binding_id, instance)
if not binding.isProvisioned():
# The binding does not exist
raise ErrBindingDoesNotExist()
# Delete the binding
self._backend.unbind(binding)
def update(self, instance_id: str, details: UpdateDetails, async_allowed: bool) -> UpdateServiceSpec:
"""Update
Not implemented. Not used by Kubernetes Service Catalog.
Raises:
NotImplementedError
"""
raise NotImplementedError()
def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding:
"""Binding the instance
see openbrokerapi documentation
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find or create the binding
binding = self._backend.find(binding_id, instance)
# Create the binding if needed
return self._backend.bind(binding, details.parameters)
def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec:
"""Deprovision an instance
see openbrokerapi documentation
Raises:
ErrInstanceDoesNotExist: Instance does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
if not instance.isProvisioned():
# the instance does not exist
raise ErrInstanceDoesNotExist()
return self._backend.delete(instance)
def last_operation(self, instance_id: str, operation_data: str) -> LastOperation:
"""Last Operation
Not implemented. We are not using asynchronous operation on Atlas Broker.
Raises:
NotImplementedError
"""
raise NotImplementedError()
|
mickybart/python-atlasbroker | atlasbroker/service.py | AtlasBroker.deprovision | python | def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec:
# Find the instance
instance = self._backend.find(instance_id)
if not instance.isProvisioned():
# the instance does not exist
raise ErrInstanceDoesNotExist()
return self._backend.delete(instance) | Deprovision an instance
see openbrokerapi documentation
Raises:
ErrInstanceDoesNotExist: Instance does not exist. | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/service.py#L141-L156 | null | class AtlasBroker(ServiceBroker):
"""Atlas Broker
Implement a service broker by overriding methods of Service
Constructor
Args:
config (config): Configuration of the broker
"""
def __init__(self, config):
# Create the AtlasBrokerBackend
self._backend = AtlasBrokerBackend(config)
self._config = config
def catalog(self):
return Service(
id=self._config.broker["id"],
name=self._config.broker["name"],
description=self._config.broker["description"],
bindable=self._config.broker["bindable"],
plans=self._config.broker["plans"],
tags=self._config.broker["tags"],
requires=self._config.broker["requires"],
metadata=self._config.broker["metadata"],
dashboard_client=self._config.broker["dashboard_client"],
plan_updateable=self._config.broker["plan_updateable"],
)
def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec:
"""Provision the new instance
see openbrokerapi documentation
Returns:
ProvisionedServiceSpec
"""
if service_details.plan_id == self._backend.config.UUID_PLANS_EXISTING_CLUSTER:
# Provision the instance on an Existing Atlas Cluster
# Find or create the instance
instance = self._backend.find(instance_id)
# Create the instance if needed
return self._backend.create(instance, service_details.parameters, existing=True)
# Plan not supported
raise ErrPlanUnsupported(service_details.plan_id)
def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails):
"""Unbinding the instance
see openbrokerapi documentation
Raises:
ErrBindingDoesNotExist: Binding does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find the binding
binding = self._backend.find(binding_id, instance)
if not binding.isProvisioned():
# The binding does not exist
raise ErrBindingDoesNotExist()
# Delete the binding
self._backend.unbind(binding)
def update(self, instance_id: str, details: UpdateDetails, async_allowed: bool) -> UpdateServiceSpec:
"""Update
Not implemented. Not used by Kubernetes Service Catalog.
Raises:
NotImplementedError
"""
raise NotImplementedError()
def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding:
"""Binding the instance
see openbrokerapi documentation
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find or create the binding
binding = self._backend.find(binding_id, instance)
# Create the binding if needed
return self._backend.bind(binding, details.parameters)
def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec:
"""Deprovision an instance
see openbrokerapi documentation
Raises:
ErrInstanceDoesNotExist: Instance does not exist.
"""
# Find the instance
instance = self._backend.find(instance_id)
if not instance.isProvisioned():
# the instance does not exist
raise ErrInstanceDoesNotExist()
return self._backend.delete(instance)
def last_operation(self, instance_id: str, operation_data: str) -> LastOperation:
"""Last Operation
Not implemented. We are not using asynchronous operation on Atlas Broker.
Raises:
NotImplementedError
"""
raise NotImplementedError()
|
mickybart/python-atlasbroker | atlasbroker/config.py | Config.generate_binding_credentials | python | def generate_binding_credentials(self, binding):
uri = self.clusters.get(binding.instance.get_cluster(), None)
if not uri:
raise ErrClusterConfig(binding.instance.get_cluster())
# partial credentials
creds = {"username" : self.generate_binding_username(binding),
"password" : pwgen(32, symbols=False),
"database" : binding.instance.get_dbname()}
# uri
uri = uri % (
creds["username"],
creds["password"],
creds["database"])
creds["uri"] = uri
# return creds
return creds | Generate binding credentials
This function will permit to define the configuration to
connect to the instance.
Those credentials will be stored on a secret and exposed to a a Pod.
We should at least returns the 'username' and 'password'.
Args:
binding (AtlasServiceBinding.Binding): A binding
Returns:
dict: All credentials and secrets.
Raises:
ErrClusterConfig: Connection string to the cluster is not available. | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/config.py#L145-L182 | [
"def generate_binding_username(self, binding):\n \"\"\"Generate binding username\n\n We don't need anything static here. The UUID is a good way to create a username.\n\n IMPORTANT: Multiple calls of this function with the same binding should return the same username.\n\n Args:\n binding (AtlasServiceBinding.Binding): A binding\n\n Returns:\n str: The username to the database\n \"\"\"\n return binding.binding_id\n"
] | class Config:
"""Configuration for AtlasBroker and sub-modules
This class can be overriden and so adapted by every compagnies to
set different policy about naming convention, password generation etc.
You should check those main functions used by the broker:
generate_instance_dbname
generate_binding_credentials
generate_binding_username
generate_binding_permissions
Constructor
Args:
atlas_credentials (dict): Atlas credentials eg: {"userame" : "", "password": "", "group": ""}
mongo_credentials (dict): Mongo credentials eg: {"uri": "", "db": "", "timeoutms": 5000, "collection": ""}
Keyword Arguments:
clusters (list): List of cluster with uri associated. If not provided, it will be populate from Atlas.
"""
# Common keys used by the broker
# (parameters on the k8s instance yaml definition)
PARAMETER_DATABASE="database"
PARAMETER_CLUSTER="cluster"
# UUID
UUID_SERVICES_CLUSTER = "2a04f349-4aab-4fcb-af6d-8e1749a77c13"
UUID_PLANS_EXISTING_CLUSTER = "8db474d1-3cc0-4f4d-b864-24e3bd49b874"
def __init__(self, atlas_credentials, mongo_credentials, clusters=None):
self.atlas = atlas_credentials
self.mongo = mongo_credentials
# Broker Service configuration
self.broker = {
"id" : self.UUID_SERVICES_CLUSTER,
"name" : "atlas-mongodb-cluster",
"description" : "Atlas/MongoDB for applications",
"bindable" : True,
"plans" : [
ServicePlan(id=self.UUID_PLANS_EXISTING_CLUSTER,
name="atlas-mongodb-existing-cluster",
description="Atlas/MongoDB: Configure an existing cluster",
metadata=None,
free=False,
bindable=True),
],
"tags" : ['atlas', 'mongodb'],
"requires" : None,
"metadata" : ServiceMetadata(
displayName='Atlas - MongoDB Cloud Provider',
imageUrl=None,
longDescription=None,
providerDisplayName=None,
documentationUrl=None,
supportUrl=None,
),
"dashboard_client" : None,
"plan_updateable" : False,
}
# Clusters configuration
if clusters:
self.clusters = clusters
else:
# load from Atlas
atlas = Atlas(self.atlas["user"],
self.atlas["password"],
self.atlas["group"])
self.clusters = {}
for cluster in atlas.Clusters.get_all_clusters(iterable=True):
uri = cluster["mongoURIWithOptions"].replace('mongodb://', 'mongodb://%s:%s@').replace('/?','/%s?')
self.clusters[cluster["name"]] = uri
def load_json(json_file):
"""Load JSON file
Args:
json_file (str): filename of a json file
Returns:
dict: content of the file
"""
try:
with open(json_file) as f:
return json.load(f)
except FileNotFoundError:
return None
def generate_instance_dbname(self, instance):
"""Generate a Database name
This function permit to define the database name for this instance.
IMPORTANT: Multiple calls of this function with the same instance should return the same database name.
The UUID is a good way to set it but if you need to share a database accross multiple namespaces,
you need to return a static name independant of the UUID.
It is not possible in the current broker api to bind to an instance from another namespace. So each namespace need
its own instance object despite that we want to share a database.
Atlas Broker is able to manage muliple instance UUID set to a unique database with a static name.
You have 2 way to do it:
- You can create each instance with the same parameters and to generate a static name based on those parameters only.
- You can set a static name directly on instance parameters with the key value of Config.PARAMETER_DATABASE. If this key exists, this function will never be called.
Args:
instance (AtlasServiceInstance.Instance): An instance
Returns:
str: The database name
"""
return 'instance-' + instance.instance_id
def generate_binding_credentials(self, binding):
"""Generate binding credentials
This function will permit to define the configuration to
connect to the instance.
Those credentials will be stored on a secret and exposed to a a Pod.
We should at least returns the 'username' and 'password'.
Args:
binding (AtlasServiceBinding.Binding): A binding
Returns:
dict: All credentials and secrets.
Raises:
ErrClusterConfig: Connection string to the cluster is not available.
"""
uri = self.clusters.get(binding.instance.get_cluster(), None)
if not uri:
raise ErrClusterConfig(binding.instance.get_cluster())
# partial credentials
creds = {"username" : self.generate_binding_username(binding),
"password" : pwgen(32, symbols=False),
"database" : binding.instance.get_dbname()}
# uri
uri = uri % (
creds["username"],
creds["password"],
creds["database"])
creds["uri"] = uri
# return creds
return creds
def isGenerateBindingCredentialsPredictible(self):
"""Is generate_binding_credentials predictible ?
Permit to know if generate_binding_credentials call will generate same credentials
for every calls with the same binding parameter.
During the binding, the first bind will send a 201 Created response with credentials in the paylod.
All other calls to bind with same parameters should return a 200 OK with credentials payload.
If a call to bind with different parameters is done, a 409 is returned without credentials payload.
However, some brokers do not respect 201/200/409 and some broker like UPS one will just send 200 for everything.
To better handle and/or workaround specs, we need to know if generate_binding_credentials
for an identical binding will return the same credentials.
That will permit the broker to decide if it can return credentials with 200 when it firstly created them with a 201
or to workaround the answer to avoid the service catalog to inject inaccurate credentials.
In the best world, it should be good to be able to generate "static" credentials and set the return to True on this function.
"""
return False
def generate_binding_username(self, binding):
"""Generate binding username
We don't need anything static here. The UUID is a good way to create a username.
IMPORTANT: Multiple calls of this function with the same binding should return the same username.
Args:
binding (AtlasServiceBinding.Binding): A binding
Returns:
str: The username to the database
"""
return binding.binding_id
def generate_binding_permissions(self, binding, permissions):
"""Generate Users pemissions on the database
Defining roles to the database for the users.
We can pass extra information into parameters of the binding if needed (see binding.parameters).
Args:
binding (AtlasServiceBinding.Binding): A binding
permissions (atlasapi.specs.DatabaseUsersPermissionsSpecs): Permissions for Atlas
Returns:
atlasapi.specs.DatabaseUsersPermissionsSpecs: Permissions for the new user
"""
permissions.add_roles(binding.instance.get_dbname(),
[RoleSpecs.dbAdmin,
RoleSpecs.readWrite])
return permissions
|
mickybart/python-atlasbroker | atlasbroker/config.py | Config.generate_binding_permissions | python | def generate_binding_permissions(self, binding, permissions):
permissions.add_roles(binding.instance.get_dbname(),
[RoleSpecs.dbAdmin,
RoleSpecs.readWrite])
return permissions | Generate Users pemissions on the database
Defining roles to the database for the users.
We can pass extra information into parameters of the binding if needed (see binding.parameters).
Args:
binding (AtlasServiceBinding.Binding): A binding
permissions (atlasapi.specs.DatabaseUsersPermissionsSpecs): Permissions for Atlas
Returns:
atlasapi.specs.DatabaseUsersPermissionsSpecs: Permissions for the new user | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/config.py#L220-L236 | null | class Config:
"""Configuration for AtlasBroker and sub-modules
This class can be overriden and so adapted by every compagnies to
set different policy about naming convention, password generation etc.
You should check those main functions used by the broker:
generate_instance_dbname
generate_binding_credentials
generate_binding_username
generate_binding_permissions
Constructor
Args:
atlas_credentials (dict): Atlas credentials eg: {"userame" : "", "password": "", "group": ""}
mongo_credentials (dict): Mongo credentials eg: {"uri": "", "db": "", "timeoutms": 5000, "collection": ""}
Keyword Arguments:
clusters (list): List of cluster with uri associated. If not provided, it will be populate from Atlas.
"""
# Common keys used by the broker
# (parameters on the k8s instance yaml definition)
PARAMETER_DATABASE="database"
PARAMETER_CLUSTER="cluster"
# UUID
UUID_SERVICES_CLUSTER = "2a04f349-4aab-4fcb-af6d-8e1749a77c13"
UUID_PLANS_EXISTING_CLUSTER = "8db474d1-3cc0-4f4d-b864-24e3bd49b874"
def __init__(self, atlas_credentials, mongo_credentials, clusters=None):
self.atlas = atlas_credentials
self.mongo = mongo_credentials
# Broker Service configuration
self.broker = {
"id" : self.UUID_SERVICES_CLUSTER,
"name" : "atlas-mongodb-cluster",
"description" : "Atlas/MongoDB for applications",
"bindable" : True,
"plans" : [
ServicePlan(id=self.UUID_PLANS_EXISTING_CLUSTER,
name="atlas-mongodb-existing-cluster",
description="Atlas/MongoDB: Configure an existing cluster",
metadata=None,
free=False,
bindable=True),
],
"tags" : ['atlas', 'mongodb'],
"requires" : None,
"metadata" : ServiceMetadata(
displayName='Atlas - MongoDB Cloud Provider',
imageUrl=None,
longDescription=None,
providerDisplayName=None,
documentationUrl=None,
supportUrl=None,
),
"dashboard_client" : None,
"plan_updateable" : False,
}
# Clusters configuration
if clusters:
self.clusters = clusters
else:
# load from Atlas
atlas = Atlas(self.atlas["user"],
self.atlas["password"],
self.atlas["group"])
self.clusters = {}
for cluster in atlas.Clusters.get_all_clusters(iterable=True):
uri = cluster["mongoURIWithOptions"].replace('mongodb://', 'mongodb://%s:%s@').replace('/?','/%s?')
self.clusters[cluster["name"]] = uri
def load_json(json_file):
"""Load JSON file
Args:
json_file (str): filename of a json file
Returns:
dict: content of the file
"""
try:
with open(json_file) as f:
return json.load(f)
except FileNotFoundError:
return None
def generate_instance_dbname(self, instance):
"""Generate a Database name
This function permit to define the database name for this instance.
IMPORTANT: Multiple calls of this function with the same instance should return the same database name.
The UUID is a good way to set it but if you need to share a database accross multiple namespaces,
you need to return a static name independant of the UUID.
It is not possible in the current broker api to bind to an instance from another namespace. So each namespace need
its own instance object despite that we want to share a database.
Atlas Broker is able to manage muliple instance UUID set to a unique database with a static name.
You have 2 way to do it:
- You can create each instance with the same parameters and to generate a static name based on those parameters only.
- You can set a static name directly on instance parameters with the key value of Config.PARAMETER_DATABASE. If this key exists, this function will never be called.
Args:
instance (AtlasServiceInstance.Instance): An instance
Returns:
str: The database name
"""
return 'instance-' + instance.instance_id
def generate_binding_credentials(self, binding):
"""Generate binding credentials
This function will permit to define the configuration to
connect to the instance.
Those credentials will be stored on a secret and exposed to a a Pod.
We should at least returns the 'username' and 'password'.
Args:
binding (AtlasServiceBinding.Binding): A binding
Returns:
dict: All credentials and secrets.
Raises:
ErrClusterConfig: Connection string to the cluster is not available.
"""
uri = self.clusters.get(binding.instance.get_cluster(), None)
if not uri:
raise ErrClusterConfig(binding.instance.get_cluster())
# partial credentials
creds = {"username" : self.generate_binding_username(binding),
"password" : pwgen(32, symbols=False),
"database" : binding.instance.get_dbname()}
# uri
uri = uri % (
creds["username"],
creds["password"],
creds["database"])
creds["uri"] = uri
# return creds
return creds
def isGenerateBindingCredentialsPredictible(self):
"""Is generate_binding_credentials predictible ?
Permit to know if generate_binding_credentials call will generate same credentials
for every calls with the same binding parameter.
During the binding, the first bind will send a 201 Created response with credentials in the paylod.
All other calls to bind with same parameters should return a 200 OK with credentials payload.
If a call to bind with different parameters is done, a 409 is returned without credentials payload.
However, some brokers do not respect 201/200/409 and some broker like UPS one will just send 200 for everything.
To better handle and/or workaround specs, we need to know if generate_binding_credentials
for an identical binding will return the same credentials.
That will permit the broker to decide if it can return credentials with 200 when it firstly created them with a 201
or to workaround the answer to avoid the service catalog to inject inaccurate credentials.
In the best world, it should be good to be able to generate "static" credentials and set the return to True on this function.
"""
return False
def generate_binding_username(self, binding):
"""Generate binding username
We don't need anything static here. The UUID is a good way to create a username.
IMPORTANT: Multiple calls of this function with the same binding should return the same username.
Args:
binding (AtlasServiceBinding.Binding): A binding
Returns:
str: The username to the database
"""
return binding.binding_id
def generate_binding_permissions(self, binding, permissions):
"""Generate Users pemissions on the database
Defining roles to the database for the users.
We can pass extra information into parameters of the binding if needed (see binding.parameters).
Args:
binding (AtlasServiceBinding.Binding): A binding
permissions (atlasapi.specs.DatabaseUsersPermissionsSpecs): Permissions for Atlas
Returns:
atlasapi.specs.DatabaseUsersPermissionsSpecs: Permissions for the new user
"""
permissions.add_roles(binding.instance.get_dbname(),
[RoleSpecs.dbAdmin,
RoleSpecs.readWrite])
return permissions
|
mickybart/python-atlasbroker | atlasbroker/apis/health.py | getApi | python | def getApi():
api = Blueprint('health', __name__, url_prefix='/')
@api.route('health', methods=['GET'])
def health():
'''Health check'''
return jsonify({ "status" : True})
return api | Get Api for /health
Returns:
Blueprint: section for healt check | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/apis/health.py#L19-L32 | null | # Copyright (c) 2018 Yellow Pages Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Health Check"""
from flask import Blueprint, jsonify
def getApi():
"""Get Api for /health
Returns:
Blueprint: section for healt check
"""
api = Blueprint('health', __name__, url_prefix='/')
@api.route('health', methods=['GET'])
def health():
'''Health check'''
return jsonify({ "status" : True})
return api
|
mickybart/python-atlasbroker | atlasbroker/servicebinding.py | AtlasServiceBinding.find | python | def find(self, binding_id, instance):
binding = AtlasServiceBinding.Binding(binding_id, instance)
self.backend.storage.populate(binding)
return binding | find an instance
Create a new instance and populate it with data stored if it exists.
Args:
binding_id (string): UUID of the binding
instance (AtlasServiceInstance.Instance): instance
Returns:
AtlasServiceBinding: A binding | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/servicebinding.py#L36-L50 | null | class AtlasServiceBinding():
"""Service Catalog : Atlas Service Binding
Constructor
Args:
backend (AtlasBrokerBackend): Atlas Broker Backend
"""
def __init__(self, backend):
self.backend = backend
def find(self, binding_id, instance):
"""find an instance
Create a new instance and populate it with data stored if it exists.
Args:
binding_id (string): UUID of the binding
instance (AtlasServiceInstance.Instance): instance
Returns:
AtlasServiceBinding: A binding
"""
binding = AtlasServiceBinding.Binding(binding_id, instance)
self.backend.storage.populate(binding)
return binding
def bind(self, binding, parameters):
""" Create the binding
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
parameters (dict): Parameters for the binding
Returns:
Binding: Status
Raises:
ErrBindingAlreadyExists: If binding exists but with different parameters
"""
if not binding.isProvisioned():
# Update binding parameters
binding.parameters = parameters
# Credentials
creds = self.backend.config.generate_binding_credentials(binding)
# Binding
p = self.backend.config.generate_binding_permissions(
binding,
DatabaseUsersPermissionsSpecs(creds["username"],creds["password"])
)
try:
self.backend.atlas.DatabaseUsers.create_a_database_user(p)
except ErrAtlasConflict:
# The user already exists. This is not an issue because this is possible that we
# created it in a previous call that failed later on the broker.
pass
self.backend.storage.store(binding)
# Bind done
return Binding(BindState.SUCCESSFUL_BOUND,
credentials = creds)
elif binding.parameters == parameters:
if self.backend.config.isGenerateBindingCredentialsPredictible():
# Identical and credentials generation is predictible so we can return credentials again.
creds = self.backend.config.generate_binding_credentials(binding)
return Binding(BindState.IDENTICAL_ALREADY_EXISTS,
credentials = creds)
# Identical but credentials generation is NOT predictible. So we are breaking the spec to avoid
# wrong data injection. In this case we trigger a conflicting parameters for the existing binding depsite
# this is not the case.
raise ErrBindingAlreadyExists()
else:
# Different parameters ...
raise ErrBindingAlreadyExists()
def unbind(self, binding):
""" Unbind the instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
"""
username = self.backend.config.generate_binding_username(binding)
try:
self.backend.atlas.DatabaseUsers.delete_a_database_user(username)
except ErrAtlasNotFound:
# The user does not exist. This is not an issue because this is possible that we
# removed it in a previous call that failed later on the broker.
# This cover a manually deleted user case too.
pass
self.backend.storage.remove(binding)
class Binding:
"""Binding
Constructor
Args:
binding_id (str): UUID of the binding
instance (AtlasServiceInstance.Instance): An instance
"""
def __init__(self, binding_id, instance):
self.binding_id = binding_id
self.instance = instance
self.provisioned = True
def isProvisioned(self):
"""was it populated from the storage ?
Returns:
bool: True (populate from stored information), False (This is a new instance)
"""
return self.provisioned
def __eq__(self, other):
return type(other) is AtlasServiceBinding.Binding and self.binding_id == other.binding_id and self.instance == other.instance
|
mickybart/python-atlasbroker | atlasbroker/servicebinding.py | AtlasServiceBinding.bind | python | def bind(self, binding, parameters):
if not binding.isProvisioned():
# Update binding parameters
binding.parameters = parameters
# Credentials
creds = self.backend.config.generate_binding_credentials(binding)
# Binding
p = self.backend.config.generate_binding_permissions(
binding,
DatabaseUsersPermissionsSpecs(creds["username"],creds["password"])
)
try:
self.backend.atlas.DatabaseUsers.create_a_database_user(p)
except ErrAtlasConflict:
# The user already exists. This is not an issue because this is possible that we
# created it in a previous call that failed later on the broker.
pass
self.backend.storage.store(binding)
# Bind done
return Binding(BindState.SUCCESSFUL_BOUND,
credentials = creds)
elif binding.parameters == parameters:
if self.backend.config.isGenerateBindingCredentialsPredictible():
# Identical and credentials generation is predictible so we can return credentials again.
creds = self.backend.config.generate_binding_credentials(binding)
return Binding(BindState.IDENTICAL_ALREADY_EXISTS,
credentials = creds)
# Identical but credentials generation is NOT predictible. So we are breaking the spec to avoid
# wrong data injection. In this case we trigger a conflicting parameters for the existing binding depsite
# this is not the case.
raise ErrBindingAlreadyExists()
else:
# Different parameters ...
raise ErrBindingAlreadyExists() | Create the binding
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
parameters (dict): Parameters for the binding
Returns:
Binding: Status
Raises:
ErrBindingAlreadyExists: If binding exists but with different parameters | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/servicebinding.py#L52-L107 | null | class AtlasServiceBinding():
"""Service Catalog : Atlas Service Binding
Constructor
Args:
backend (AtlasBrokerBackend): Atlas Broker Backend
"""
def __init__(self, backend):
self.backend = backend
def find(self, binding_id, instance):
"""find an instance
Create a new instance and populate it with data stored if it exists.
Args:
binding_id (string): UUID of the binding
instance (AtlasServiceInstance.Instance): instance
Returns:
AtlasServiceBinding: A binding
"""
binding = AtlasServiceBinding.Binding(binding_id, instance)
self.backend.storage.populate(binding)
return binding
def bind(self, binding, parameters):
""" Create the binding
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
parameters (dict): Parameters for the binding
Returns:
Binding: Status
Raises:
ErrBindingAlreadyExists: If binding exists but with different parameters
"""
if not binding.isProvisioned():
# Update binding parameters
binding.parameters = parameters
# Credentials
creds = self.backend.config.generate_binding_credentials(binding)
# Binding
p = self.backend.config.generate_binding_permissions(
binding,
DatabaseUsersPermissionsSpecs(creds["username"],creds["password"])
)
try:
self.backend.atlas.DatabaseUsers.create_a_database_user(p)
except ErrAtlasConflict:
# The user already exists. This is not an issue because this is possible that we
# created it in a previous call that failed later on the broker.
pass
self.backend.storage.store(binding)
# Bind done
return Binding(BindState.SUCCESSFUL_BOUND,
credentials = creds)
elif binding.parameters == parameters:
if self.backend.config.isGenerateBindingCredentialsPredictible():
# Identical and credentials generation is predictible so we can return credentials again.
creds = self.backend.config.generate_binding_credentials(binding)
return Binding(BindState.IDENTICAL_ALREADY_EXISTS,
credentials = creds)
# Identical but credentials generation is NOT predictible. So we are breaking the spec to avoid
# wrong data injection. In this case we trigger a conflicting parameters for the existing binding depsite
# this is not the case.
raise ErrBindingAlreadyExists()
else:
# Different parameters ...
raise ErrBindingAlreadyExists()
def unbind(self, binding):
""" Unbind the instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
"""
username = self.backend.config.generate_binding_username(binding)
try:
self.backend.atlas.DatabaseUsers.delete_a_database_user(username)
except ErrAtlasNotFound:
# The user does not exist. This is not an issue because this is possible that we
# removed it in a previous call that failed later on the broker.
# This cover a manually deleted user case too.
pass
self.backend.storage.remove(binding)
class Binding:
"""Binding
Constructor
Args:
binding_id (str): UUID of the binding
instance (AtlasServiceInstance.Instance): An instance
"""
def __init__(self, binding_id, instance):
self.binding_id = binding_id
self.instance = instance
self.provisioned = True
def isProvisioned(self):
"""was it populated from the storage ?
Returns:
bool: True (populate from stored information), False (This is a new instance)
"""
return self.provisioned
def __eq__(self, other):
return type(other) is AtlasServiceBinding.Binding and self.binding_id == other.binding_id and self.instance == other.instance
|
mickybart/python-atlasbroker | atlasbroker/servicebinding.py | AtlasServiceBinding.unbind | python | def unbind(self, binding):
username = self.backend.config.generate_binding_username(binding)
try:
self.backend.atlas.DatabaseUsers.delete_a_database_user(username)
except ErrAtlasNotFound:
# The user does not exist. This is not an issue because this is possible that we
# removed it in a previous call that failed later on the broker.
# This cover a manually deleted user case too.
pass
self.backend.storage.remove(binding) | Unbind the instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/servicebinding.py#L109-L126 | null | class AtlasServiceBinding():
"""Service Catalog : Atlas Service Binding
Constructor
Args:
backend (AtlasBrokerBackend): Atlas Broker Backend
"""
def __init__(self, backend):
self.backend = backend
def find(self, binding_id, instance):
"""find an instance
Create a new instance and populate it with data stored if it exists.
Args:
binding_id (string): UUID of the binding
instance (AtlasServiceInstance.Instance): instance
Returns:
AtlasServiceBinding: A binding
"""
binding = AtlasServiceBinding.Binding(binding_id, instance)
self.backend.storage.populate(binding)
return binding
def bind(self, binding, parameters):
""" Create the binding
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
parameters (dict): Parameters for the binding
Returns:
Binding: Status
Raises:
ErrBindingAlreadyExists: If binding exists but with different parameters
"""
if not binding.isProvisioned():
# Update binding parameters
binding.parameters = parameters
# Credentials
creds = self.backend.config.generate_binding_credentials(binding)
# Binding
p = self.backend.config.generate_binding_permissions(
binding,
DatabaseUsersPermissionsSpecs(creds["username"],creds["password"])
)
try:
self.backend.atlas.DatabaseUsers.create_a_database_user(p)
except ErrAtlasConflict:
# The user already exists. This is not an issue because this is possible that we
# created it in a previous call that failed later on the broker.
pass
self.backend.storage.store(binding)
# Bind done
return Binding(BindState.SUCCESSFUL_BOUND,
credentials = creds)
elif binding.parameters == parameters:
if self.backend.config.isGenerateBindingCredentialsPredictible():
# Identical and credentials generation is predictible so we can return credentials again.
creds = self.backend.config.generate_binding_credentials(binding)
return Binding(BindState.IDENTICAL_ALREADY_EXISTS,
credentials = creds)
# Identical but credentials generation is NOT predictible. So we are breaking the spec to avoid
# wrong data injection. In this case we trigger a conflicting parameters for the existing binding depsite
# this is not the case.
raise ErrBindingAlreadyExists()
else:
# Different parameters ...
raise ErrBindingAlreadyExists()
def unbind(self, binding):
""" Unbind the instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
"""
username = self.backend.config.generate_binding_username(binding)
try:
self.backend.atlas.DatabaseUsers.delete_a_database_user(username)
except ErrAtlasNotFound:
# The user does not exist. This is not an issue because this is possible that we
# removed it in a previous call that failed later on the broker.
# This cover a manually deleted user case too.
pass
self.backend.storage.remove(binding)
class Binding:
"""Binding
Constructor
Args:
binding_id (str): UUID of the binding
instance (AtlasServiceInstance.Instance): An instance
"""
def __init__(self, binding_id, instance):
self.binding_id = binding_id
self.instance = instance
self.provisioned = True
def isProvisioned(self):
"""was it populated from the storage ?
Returns:
bool: True (populate from stored information), False (This is a new instance)
"""
return self.provisioned
def __eq__(self, other):
return type(other) is AtlasServiceBinding.Binding and self.binding_id == other.binding_id and self.instance == other.instance
|
mickybart/python-atlasbroker | atlasbroker/backend.py | AtlasBrokerBackend.find | python | def find(self, _id, instance = None):
if instance is None:
# We are looking for an instance
return self.service_instance.find(_id)
else:
# We are looking for a binding
return self.service_binding.find(_id, instance) | Find
Args:
_id (str): instance id or binding Id
Keyword Arguments:
instance (AtlasServiceInstance.Instance): Existing instance
Returns:
AtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding. | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/backend.py#L50-L68 | null | class AtlasBrokerBackend:
"""Backend for the Atlas Broker
Expose all services to serve Broker requests
Constructor
Args:
config (Config): Configuration of the Atlas Broker
"""
def __init__(self, config):
self.config = config
self.storage = AtlasBrokerStorage(self.config.mongo["uri"],
self.config.mongo["timeoutms"],
self.config.mongo["db"],
self.config.mongo["collection"])
self.atlas = Atlas(self.config.atlas["user"],
self.config.atlas["password"],
self.config.atlas["group"])
self.service_instance = AtlasServiceInstance(self)
self.service_binding = AtlasServiceBinding(self)
def find(self, _id, instance = None):
""" Find
Args:
_id (str): instance id or binding Id
Keyword Arguments:
instance (AtlasServiceInstance.Instance): Existing instance
Returns:
AtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding.
"""
if instance is None:
# We are looking for an instance
return self.service_instance.find(_id)
else:
# We are looking for a binding
return self.service_binding.find(_id, instance)
def create(self, instance, parameters, existing=True):
"""Create an instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
Keyword Arguments:
existing (bool): True (use an existing cluster), False (create a new cluster)
Returns:
ProvisionedServiceSpec: Status
"""
return self.service_instance.create(instance, parameters, existing)
def delete(self, instance):
"""Delete an instance
Args:
instance (AtlasServiceInstance.Instance): Existing instance
Returns:
DeprovisionServiceSpec: Status
"""
return self.service_instance.delete(instance)
def bind(self, binding, parameters):
"""Binding to an instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
parameters (dict): Parameters for the binding
Returns:
Binding: Status
"""
return self.service_binding.bind(binding, parameters)
def unbind(self, binding):
"""Unbinding an instance
Args:
binding (AtlasServiceBinding.Binding): Existing binding
"""
self.service_binding.unbind(binding)
|
mickybart/python-atlasbroker | atlasbroker/backend.py | AtlasBrokerBackend.create | python | def create(self, instance, parameters, existing=True):
return self.service_instance.create(instance, parameters, existing) | Create an instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
Keyword Arguments:
existing (bool): True (use an existing cluster), False (create a new cluster)
Returns:
ProvisionedServiceSpec: Status | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/backend.py#L70-L83 | null | class AtlasBrokerBackend:
"""Backend for the Atlas Broker
Expose all services to serve Broker requests
Constructor
Args:
config (Config): Configuration of the Atlas Broker
"""
def __init__(self, config):
self.config = config
self.storage = AtlasBrokerStorage(self.config.mongo["uri"],
self.config.mongo["timeoutms"],
self.config.mongo["db"],
self.config.mongo["collection"])
self.atlas = Atlas(self.config.atlas["user"],
self.config.atlas["password"],
self.config.atlas["group"])
self.service_instance = AtlasServiceInstance(self)
self.service_binding = AtlasServiceBinding(self)
def find(self, _id, instance = None):
""" Find
Args:
_id (str): instance id or binding Id
Keyword Arguments:
instance (AtlasServiceInstance.Instance): Existing instance
Returns:
AtlasServiceInstance.Instance or AtlasServiceBinding.Binding: An instance or binding.
"""
if instance is None:
# We are looking for an instance
return self.service_instance.find(_id)
else:
# We are looking for a binding
return self.service_binding.find(_id, instance)
def create(self, instance, parameters, existing=True):
"""Create an instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
Keyword Arguments:
existing (bool): True (use an existing cluster), False (create a new cluster)
Returns:
ProvisionedServiceSpec: Status
"""
return self.service_instance.create(instance, parameters, existing)
def delete(self, instance):
"""Delete an instance
Args:
instance (AtlasServiceInstance.Instance): Existing instance
Returns:
DeprovisionServiceSpec: Status
"""
return self.service_instance.delete(instance)
def bind(self, binding, parameters):
"""Binding to an instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
parameters (dict): Parameters for the binding
Returns:
Binding: Status
"""
return self.service_binding.bind(binding, parameters)
def unbind(self, binding):
"""Unbinding an instance
Args:
binding (AtlasServiceBinding.Binding): Existing binding
"""
self.service_binding.unbind(binding)
|
mickybart/python-atlasbroker | atlasbroker/serviceinstance.py | AtlasServiceInstance.find | python | def find(self, instance_id):
instance = AtlasServiceInstance.Instance(instance_id, self.backend)
self.backend.storage.populate(instance)
return instance | find an instance
Create a new instance and populate it with data stored if it exists.
Args:
instance_id (str): UUID of the instance
Returns:
AtlasServiceInstance.Instance: An instance | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/serviceinstance.py#L35-L48 | null | class AtlasServiceInstance():
"""Service Catalog : Atlas Service Instance
Constructor
Args:
backend (AtlasBrokerBackend): Atlas Broker Backend
"""
def __init__(self, backend):
self.backend = backend
def find(self, instance_id):
""" find an instance
Create a new instance and populate it with data stored if it exists.
Args:
instance_id (str): UUID of the instance
Returns:
AtlasServiceInstance.Instance: An instance
"""
instance = AtlasServiceInstance.Instance(instance_id, self.backend)
self.backend.storage.populate(instance)
return instance
def create(self, instance, parameters, existing):
""" Create the instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
existing (bool): Create an instance on an existing Atlas cluster
Returns:
ProvisionedServiceSpec: Status
Raises:
ErrInstanceAlreadyExists: If instance exists but with different parameters
ErrClusterNotFound: Cluster does not exist
"""
if not instance.isProvisioned():
# Set parameters
instance.parameters = parameters
# Existing cluster
if existing and not self.backend.atlas.Clusters.is_existing_cluster(instance.parameters[self.backend.config.PARAMETER_CLUSTER]):
# We need to use an existing cluster that is not available !
raise ErrClusterNotFound(instance.parameters[self.backend.config.PARAMETER_CLUSTER])
elif not existing:
# We need to create a new cluster
# We should not reach this code because the AtlasBroker.provision should
# raise an ErrPlanUnsupported before.
raise NotImplementedError()
result = self.backend.storage.store(instance)
# Provision done
return ProvisionedServiceSpec(ProvisionState.SUCCESSFUL_CREATED,
"",
str(result))
elif instance.parameters == parameters:
# Identical so nothing to do
return ProvisionedServiceSpec(ProvisionState.IDENTICAL_ALREADY_EXISTS,
"",
"duplicate")
else:
# Different parameters ...
raise ErrInstanceAlreadyExists()
def delete(self, instance):
"""Delete the instance
Args:
instance (AtlasServiceInstance.Instance): an existing instance
Returns:
DeprovisionServiceSpec: Status
"""
#TODO: Really drop the database based on a policy set in `instance.parameters`.
#
# We need :
# - Set a policy in parameters of the instance (eg: policy-on-delete : retain|drop => default to retain)
# - to check that the database name `instance.get_dbname()` is not in use by another instance (shared database)
# - credential on the Atlas cluster `instance.get_cluster()` to drop the database
#
self.backend.storage.remove(instance)
return DeprovisionServiceSpec(False, "done")
class Instance:
"""Instance
Constructor
Args:
instance_id (str): UUID of the instance
backend (AtlasBrokerBackend): Atlas Broker Backend
Keyword Arguments:
parameters (dict): Parameters for the instance
"""
def __init__(self, instance_id, backend, parameters=None):
self.instance_id = instance_id
self.backend = backend
self.parameters = parameters
self.provisioned = True
def isProvisioned(self):
"""was it populated from the storage ?
Returns:
bool: True (populate from stored information), False (This is a new instance)
"""
return self.provisioned
def __eq__(self, other):
return type(other) is AtlasServiceInstance.Instance and self.instance_id == other.instance_id and self.parameters == other.parameters
def get_dbname(self):
"""Get the database name
Returns:
str: The database name
"""
static_name = self.parameters.get(self.backend.config.PARAMETER_DATABASE, None)
if static_name:
return static_name
return self.backend.config.generate_instance_dbname(self)
def get_cluster(self):
"""Get the Atlas cluster
Returns:
str: The Atlas cluster name
"""
return self.parameters[self.backend.config.PARAMETER_CLUSTER]
|
mickybart/python-atlasbroker | atlasbroker/serviceinstance.py | AtlasServiceInstance.create | python | def create(self, instance, parameters, existing):
if not instance.isProvisioned():
# Set parameters
instance.parameters = parameters
# Existing cluster
if existing and not self.backend.atlas.Clusters.is_existing_cluster(instance.parameters[self.backend.config.PARAMETER_CLUSTER]):
# We need to use an existing cluster that is not available !
raise ErrClusterNotFound(instance.parameters[self.backend.config.PARAMETER_CLUSTER])
elif not existing:
# We need to create a new cluster
# We should not reach this code because the AtlasBroker.provision should
# raise an ErrPlanUnsupported before.
raise NotImplementedError()
result = self.backend.storage.store(instance)
# Provision done
return ProvisionedServiceSpec(ProvisionState.SUCCESSFUL_CREATED,
"",
str(result))
elif instance.parameters == parameters:
# Identical so nothing to do
return ProvisionedServiceSpec(ProvisionState.IDENTICAL_ALREADY_EXISTS,
"",
"duplicate")
else:
# Different parameters ...
raise ErrInstanceAlreadyExists() | Create the instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
existing (bool): Create an instance on an existing Atlas cluster
Returns:
ProvisionedServiceSpec: Status
Raises:
ErrInstanceAlreadyExists: If instance exists but with different parameters
ErrClusterNotFound: Cluster does not exist | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/serviceinstance.py#L50-L95 | null | class AtlasServiceInstance():
"""Service Catalog : Atlas Service Instance
Constructor
Args:
backend (AtlasBrokerBackend): Atlas Broker Backend
"""
def __init__(self, backend):
self.backend = backend
def find(self, instance_id):
""" find an instance
Create a new instance and populate it with data stored if it exists.
Args:
instance_id (str): UUID of the instance
Returns:
AtlasServiceInstance.Instance: An instance
"""
instance = AtlasServiceInstance.Instance(instance_id, self.backend)
self.backend.storage.populate(instance)
return instance
def create(self, instance, parameters, existing):
""" Create the instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
existing (bool): Create an instance on an existing Atlas cluster
Returns:
ProvisionedServiceSpec: Status
Raises:
ErrInstanceAlreadyExists: If instance exists but with different parameters
ErrClusterNotFound: Cluster does not exist
"""
if not instance.isProvisioned():
# Set parameters
instance.parameters = parameters
# Existing cluster
if existing and not self.backend.atlas.Clusters.is_existing_cluster(instance.parameters[self.backend.config.PARAMETER_CLUSTER]):
# We need to use an existing cluster that is not available !
raise ErrClusterNotFound(instance.parameters[self.backend.config.PARAMETER_CLUSTER])
elif not existing:
# We need to create a new cluster
# We should not reach this code because the AtlasBroker.provision should
# raise an ErrPlanUnsupported before.
raise NotImplementedError()
result = self.backend.storage.store(instance)
# Provision done
return ProvisionedServiceSpec(ProvisionState.SUCCESSFUL_CREATED,
"",
str(result))
elif instance.parameters == parameters:
# Identical so nothing to do
return ProvisionedServiceSpec(ProvisionState.IDENTICAL_ALREADY_EXISTS,
"",
"duplicate")
else:
# Different parameters ...
raise ErrInstanceAlreadyExists()
def delete(self, instance):
"""Delete the instance
Args:
instance (AtlasServiceInstance.Instance): an existing instance
Returns:
DeprovisionServiceSpec: Status
"""
#TODO: Really drop the database based on a policy set in `instance.parameters`.
#
# We need :
# - Set a policy in parameters of the instance (eg: policy-on-delete : retain|drop => default to retain)
# - to check that the database name `instance.get_dbname()` is not in use by another instance (shared database)
# - credential on the Atlas cluster `instance.get_cluster()` to drop the database
#
self.backend.storage.remove(instance)
return DeprovisionServiceSpec(False, "done")
class Instance:
"""Instance
Constructor
Args:
instance_id (str): UUID of the instance
backend (AtlasBrokerBackend): Atlas Broker Backend
Keyword Arguments:
parameters (dict): Parameters for the instance
"""
def __init__(self, instance_id, backend, parameters=None):
self.instance_id = instance_id
self.backend = backend
self.parameters = parameters
self.provisioned = True
def isProvisioned(self):
"""was it populated from the storage ?
Returns:
bool: True (populate from stored information), False (This is a new instance)
"""
return self.provisioned
def __eq__(self, other):
return type(other) is AtlasServiceInstance.Instance and self.instance_id == other.instance_id and self.parameters == other.parameters
def get_dbname(self):
"""Get the database name
Returns:
str: The database name
"""
static_name = self.parameters.get(self.backend.config.PARAMETER_DATABASE, None)
if static_name:
return static_name
return self.backend.config.generate_instance_dbname(self)
def get_cluster(self):
"""Get the Atlas cluster
Returns:
str: The Atlas cluster name
"""
return self.parameters[self.backend.config.PARAMETER_CLUSTER]
|
mickybart/python-atlasbroker | atlasbroker/serviceinstance.py | AtlasServiceInstance.delete | python | def delete(self, instance):
#TODO: Really drop the database based on a policy set in `instance.parameters`.
#
# We need :
# - Set a policy in parameters of the instance (eg: policy-on-delete : retain|drop => default to retain)
# - to check that the database name `instance.get_dbname()` is not in use by another instance (shared database)
# - credential on the Atlas cluster `instance.get_cluster()` to drop the database
#
self.backend.storage.remove(instance)
return DeprovisionServiceSpec(False, "done") | Delete the instance
Args:
instance (AtlasServiceInstance.Instance): an existing instance
Returns:
DeprovisionServiceSpec: Status | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/serviceinstance.py#L97-L117 | null | class AtlasServiceInstance():
"""Service Catalog : Atlas Service Instance
Constructor
Args:
backend (AtlasBrokerBackend): Atlas Broker Backend
"""
def __init__(self, backend):
self.backend = backend
def find(self, instance_id):
""" find an instance
Create a new instance and populate it with data stored if it exists.
Args:
instance_id (str): UUID of the instance
Returns:
AtlasServiceInstance.Instance: An instance
"""
instance = AtlasServiceInstance.Instance(instance_id, self.backend)
self.backend.storage.populate(instance)
return instance
def create(self, instance, parameters, existing):
""" Create the instance
Args:
instance (AtlasServiceInstance.Instance): Existing or New instance
parameters (dict): Parameters for the instance
existing (bool): Create an instance on an existing Atlas cluster
Returns:
ProvisionedServiceSpec: Status
Raises:
ErrInstanceAlreadyExists: If instance exists but with different parameters
ErrClusterNotFound: Cluster does not exist
"""
if not instance.isProvisioned():
# Set parameters
instance.parameters = parameters
# Existing cluster
if existing and not self.backend.atlas.Clusters.is_existing_cluster(instance.parameters[self.backend.config.PARAMETER_CLUSTER]):
# We need to use an existing cluster that is not available !
raise ErrClusterNotFound(instance.parameters[self.backend.config.PARAMETER_CLUSTER])
elif not existing:
# We need to create a new cluster
# We should not reach this code because the AtlasBroker.provision should
# raise an ErrPlanUnsupported before.
raise NotImplementedError()
result = self.backend.storage.store(instance)
# Provision done
return ProvisionedServiceSpec(ProvisionState.SUCCESSFUL_CREATED,
"",
str(result))
elif instance.parameters == parameters:
# Identical so nothing to do
return ProvisionedServiceSpec(ProvisionState.IDENTICAL_ALREADY_EXISTS,
"",
"duplicate")
else:
# Different parameters ...
raise ErrInstanceAlreadyExists()
def delete(self, instance):
"""Delete the instance
Args:
instance (AtlasServiceInstance.Instance): an existing instance
Returns:
DeprovisionServiceSpec: Status
"""
#TODO: Really drop the database based on a policy set in `instance.parameters`.
#
# We need :
# - Set a policy in parameters of the instance (eg: policy-on-delete : retain|drop => default to retain)
# - to check that the database name `instance.get_dbname()` is not in use by another instance (shared database)
# - credential on the Atlas cluster `instance.get_cluster()` to drop the database
#
self.backend.storage.remove(instance)
return DeprovisionServiceSpec(False, "done")
class Instance:
"""Instance
Constructor
Args:
instance_id (str): UUID of the instance
backend (AtlasBrokerBackend): Atlas Broker Backend
Keyword Arguments:
parameters (dict): Parameters for the instance
"""
def __init__(self, instance_id, backend, parameters=None):
self.instance_id = instance_id
self.backend = backend
self.parameters = parameters
self.provisioned = True
def isProvisioned(self):
"""was it populated from the storage ?
Returns:
bool: True (populate from stored information), False (This is a new instance)
"""
return self.provisioned
def __eq__(self, other):
return type(other) is AtlasServiceInstance.Instance and self.instance_id == other.instance_id and self.parameters == other.parameters
def get_dbname(self):
"""Get the database name
Returns:
str: The database name
"""
static_name = self.parameters.get(self.backend.config.PARAMETER_DATABASE, None)
if static_name:
return static_name
return self.backend.config.generate_instance_dbname(self)
def get_cluster(self):
"""Get the Atlas cluster
Returns:
str: The Atlas cluster name
"""
return self.parameters[self.backend.config.PARAMETER_CLUSTER]
|
mickybart/python-atlasbroker | atlasbroker/storage.py | AtlasBrokerStorage.populate | python | def populate(self, obj):
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "binding_id" : { "$exists" : False } }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "instance_id" : obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# find
try:
result = self.broker.find_one(query)
except:
raise ErrStorageMongoConnection("Populate Instance or Binding")
if result is not None:
obj.parameters = result["parameters"]
# Flags the obj to provisioned
obj.provisioned = True
else:
# New
obj.provisioned = False | Populate
Query mongo to get information about the obj if it exists
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageMongoConnection: Error during MongoDB communication. | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/storage.py#L73-L107 | null | class AtlasBrokerStorage:
""" Storage
Permit to store ServiceInstance and ServiceBinding into a MongoDB.
This is used for caching and to trace what is done by the broker.
This is internally used to don't create same instances/bindings and to return appropriate code like AlreadyExists
That reducing the number of call to Atlas APIs too.
Constructor
Args:
uri (str): MongoDB connection string
timeoutms (int): MongoDB requests timeout in ms
db (str): The DB name
collection (str): The collection name
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
"""
def __init__(self, uri, timeoutms, db, collection):
self.mongo_client = None
# Connect to Mongo
try:
print("connection to mongo...")
# Init Mongo and create DB and collections objects
self.mongo_client = pymongo.MongoClient(uri, timeoutms)
self.db = self.mongo_client[db]
self.broker = self.db.get_collection(collection)
if len(self.broker.index_information()) == 0:
# collection does not exist
# create it and create indexes
self.db.create_collection(collection)
self.broker.create_index( "instance_id" )
self.broker.create_index( "binding_id" )
print("mongo: connected")
except Exception as e:
print("mongo: " + str(e))
self.mongo_client = None
raise ErrStorageMongoConnection("Initialization")
def populate(self, obj):
""" Populate
Query mongo to get information about the obj if it exists
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageMongoConnection: Error during MongoDB communication.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "binding_id" : { "$exists" : False } }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "instance_id" : obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# find
try:
result = self.broker.find_one(query)
except:
raise ErrStorageMongoConnection("Populate Instance or Binding")
if result is not None:
obj.parameters = result["parameters"]
# Flags the obj to provisioned
obj.provisioned = True
else:
# New
obj.provisioned = False
def store(self, obj):
""" Store
Store an object into the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Returns:
ObjectId: MongoDB _id
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageStore : Failed to store the binding or instance.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# insert
try:
result = self.broker.insert_one(query)
except:
raise ErrStorageMongoConnection("Store Instance or Binding")
if result is not None:
# Flags the obj to provisioned
obj.provisioned = True
return result.inserted_id
raise ErrStorageStore()
def remove(self, obj):
""" Remove
Remove an object from the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
"""
if type(obj) is AtlasServiceInstance.Instance:
self.remove_instance(obj)
elif type(obj) is AtlasServiceBinding.Binding:
self.remove_binding(obj)
else:
raise ErrStorageTypeUnsupported(type(obj))
def remove_instance(self, instance):
""" Remove an instance
Remove an object from the MongoDB storage for caching
Args:
instance (AtlasServiceInstance.Instance): instance
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveInstance: Failed to remove the instance.
"""
# query
query = { "instance_id" : instance.instance_id, "binding_id" : { "$exists" : False } }
# delete the instance
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Instance")
# return the result
if result is not None and result.deleted_count == 1:
instance.provisioned = False
else:
raise ErrStorageRemoveInstance(instance.instance_id)
def remove_binding(self, binding):
""" Remove a binding
Remove an object from the MongoDB storage for caching
Args:
binding (AtlasServiceBinding.Binding): binding
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveBinding: Failed to remove the binding
"""
# query
query = { "binding_id" : binding.binding_id, "instance_id" : binding.instance.instance_id }
# delete the binding
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Binding")
# return the result
if result is not None and result.deleted_count == 1:
binding.provisioned = False
else:
raise ErrStorageRemoveBinding(binding.binding_id)
|
mickybart/python-atlasbroker | atlasbroker/storage.py | AtlasBrokerStorage.store | python | def store(self, obj):
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# insert
try:
result = self.broker.insert_one(query)
except:
raise ErrStorageMongoConnection("Store Instance or Binding")
if result is not None:
# Flags the obj to provisioned
obj.provisioned = True
return result.inserted_id
raise ErrStorageStore() | Store
Store an object into the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Returns:
ObjectId: MongoDB _id
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageStore : Failed to store the binding or instance. | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/storage.py#L109-L145 | null | class AtlasBrokerStorage:
""" Storage
Permit to store ServiceInstance and ServiceBinding into a MongoDB.
This is used for caching and to trace what is done by the broker.
This is internally used to don't create same instances/bindings and to return appropriate code like AlreadyExists
That reducing the number of call to Atlas APIs too.
Constructor
Args:
uri (str): MongoDB connection string
timeoutms (int): MongoDB requests timeout in ms
db (str): The DB name
collection (str): The collection name
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
"""
def __init__(self, uri, timeoutms, db, collection):
self.mongo_client = None
# Connect to Mongo
try:
print("connection to mongo...")
# Init Mongo and create DB and collections objects
self.mongo_client = pymongo.MongoClient(uri, timeoutms)
self.db = self.mongo_client[db]
self.broker = self.db.get_collection(collection)
if len(self.broker.index_information()) == 0:
# collection does not exist
# create it and create indexes
self.db.create_collection(collection)
self.broker.create_index( "instance_id" )
self.broker.create_index( "binding_id" )
print("mongo: connected")
except Exception as e:
print("mongo: " + str(e))
self.mongo_client = None
raise ErrStorageMongoConnection("Initialization")
def populate(self, obj):
""" Populate
Query mongo to get information about the obj if it exists
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageMongoConnection: Error during MongoDB communication.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "binding_id" : { "$exists" : False } }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "instance_id" : obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# find
try:
result = self.broker.find_one(query)
except:
raise ErrStorageMongoConnection("Populate Instance or Binding")
if result is not None:
obj.parameters = result["parameters"]
# Flags the obj to provisioned
obj.provisioned = True
else:
# New
obj.provisioned = False
def store(self, obj):
""" Store
Store an object into the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Returns:
ObjectId: MongoDB _id
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageStore : Failed to store the binding or instance.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# insert
try:
result = self.broker.insert_one(query)
except:
raise ErrStorageMongoConnection("Store Instance or Binding")
if result is not None:
# Flags the obj to provisioned
obj.provisioned = True
return result.inserted_id
raise ErrStorageStore()
def remove(self, obj):
""" Remove
Remove an object from the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
"""
if type(obj) is AtlasServiceInstance.Instance:
self.remove_instance(obj)
elif type(obj) is AtlasServiceBinding.Binding:
self.remove_binding(obj)
else:
raise ErrStorageTypeUnsupported(type(obj))
def remove_instance(self, instance):
""" Remove an instance
Remove an object from the MongoDB storage for caching
Args:
instance (AtlasServiceInstance.Instance): instance
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveInstance: Failed to remove the instance.
"""
# query
query = { "instance_id" : instance.instance_id, "binding_id" : { "$exists" : False } }
# delete the instance
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Instance")
# return the result
if result is not None and result.deleted_count == 1:
instance.provisioned = False
else:
raise ErrStorageRemoveInstance(instance.instance_id)
def remove_binding(self, binding):
""" Remove a binding
Remove an object from the MongoDB storage for caching
Args:
binding (AtlasServiceBinding.Binding): binding
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveBinding: Failed to remove the binding
"""
# query
query = { "binding_id" : binding.binding_id, "instance_id" : binding.instance.instance_id }
# delete the binding
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Binding")
# return the result
if result is not None and result.deleted_count == 1:
binding.provisioned = False
else:
raise ErrStorageRemoveBinding(binding.binding_id)
|
mickybart/python-atlasbroker | atlasbroker/storage.py | AtlasBrokerStorage.remove | python | def remove(self, obj):
if type(obj) is AtlasServiceInstance.Instance:
self.remove_instance(obj)
elif type(obj) is AtlasServiceBinding.Binding:
self.remove_binding(obj)
else:
raise ErrStorageTypeUnsupported(type(obj)) | Remove
Remove an object from the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported. | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/storage.py#L147-L163 | [
"def remove_instance(self, instance):\n \"\"\" Remove an instance\n\n Remove an object from the MongoDB storage for caching\n\n Args:\n instance (AtlasServiceInstance.Instance): instance\n\n Raises:\n ErrStorageMongoConnection: Error during MongoDB communication.\n ErrStorageRemoveInstance: Failed to remove the instance.\n \"\"\"\n\n # query\n query = { \"instance_id\" : instance.instance_id, \"binding_id\" : { \"$exists\" : False } }\n\n # delete the instance\n try:\n result = self.broker.delete_one(query)\n except:\n raise ErrStorageMongoConnection(\"Remove Instance\")\n\n # return the result\n if result is not None and result.deleted_count == 1:\n instance.provisioned = False\n else:\n raise ErrStorageRemoveInstance(instance.instance_id)\n",
"def remove_binding(self, binding):\n \"\"\" Remove a binding\n\n Remove an object from the MongoDB storage for caching\n\n Args:\n binding (AtlasServiceBinding.Binding): binding\n\n Raises:\n ErrStorageMongoConnection: Error during MongoDB communication.\n ErrStorageRemoveBinding: Failed to remove the binding\n \"\"\"\n\n # query\n query = { \"binding_id\" : binding.binding_id, \"instance_id\" : binding.instance.instance_id }\n\n # delete the binding\n try:\n result = self.broker.delete_one(query)\n except:\n raise ErrStorageMongoConnection(\"Remove Binding\")\n\n # return the result\n if result is not None and result.deleted_count == 1:\n binding.provisioned = False\n else:\n raise ErrStorageRemoveBinding(binding.binding_id)\n"
] | class AtlasBrokerStorage:
""" Storage
Permit to store ServiceInstance and ServiceBinding into a MongoDB.
This is used for caching and to trace what is done by the broker.
This is internally used to don't create same instances/bindings and to return appropriate code like AlreadyExists
That reducing the number of call to Atlas APIs too.
Constructor
Args:
uri (str): MongoDB connection string
timeoutms (int): MongoDB requests timeout in ms
db (str): The DB name
collection (str): The collection name
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
"""
def __init__(self, uri, timeoutms, db, collection):
self.mongo_client = None
# Connect to Mongo
try:
print("connection to mongo...")
# Init Mongo and create DB and collections objects
self.mongo_client = pymongo.MongoClient(uri, timeoutms)
self.db = self.mongo_client[db]
self.broker = self.db.get_collection(collection)
if len(self.broker.index_information()) == 0:
# collection does not exist
# create it and create indexes
self.db.create_collection(collection)
self.broker.create_index( "instance_id" )
self.broker.create_index( "binding_id" )
print("mongo: connected")
except Exception as e:
print("mongo: " + str(e))
self.mongo_client = None
raise ErrStorageMongoConnection("Initialization")
def populate(self, obj):
""" Populate
Query mongo to get information about the obj if it exists
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageMongoConnection: Error during MongoDB communication.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "binding_id" : { "$exists" : False } }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "instance_id" : obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# find
try:
result = self.broker.find_one(query)
except:
raise ErrStorageMongoConnection("Populate Instance or Binding")
if result is not None:
obj.parameters = result["parameters"]
# Flags the obj to provisioned
obj.provisioned = True
else:
# New
obj.provisioned = False
def store(self, obj):
""" Store
Store an object into the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Returns:
ObjectId: MongoDB _id
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageStore : Failed to store the binding or instance.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# insert
try:
result = self.broker.insert_one(query)
except:
raise ErrStorageMongoConnection("Store Instance or Binding")
if result is not None:
# Flags the obj to provisioned
obj.provisioned = True
return result.inserted_id
raise ErrStorageStore()
def remove(self, obj):
""" Remove
Remove an object from the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
"""
if type(obj) is AtlasServiceInstance.Instance:
self.remove_instance(obj)
elif type(obj) is AtlasServiceBinding.Binding:
self.remove_binding(obj)
else:
raise ErrStorageTypeUnsupported(type(obj))
def remove_instance(self, instance):
""" Remove an instance
Remove an object from the MongoDB storage for caching
Args:
instance (AtlasServiceInstance.Instance): instance
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveInstance: Failed to remove the instance.
"""
# query
query = { "instance_id" : instance.instance_id, "binding_id" : { "$exists" : False } }
# delete the instance
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Instance")
# return the result
if result is not None and result.deleted_count == 1:
instance.provisioned = False
else:
raise ErrStorageRemoveInstance(instance.instance_id)
def remove_binding(self, binding):
""" Remove a binding
Remove an object from the MongoDB storage for caching
Args:
binding (AtlasServiceBinding.Binding): binding
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveBinding: Failed to remove the binding
"""
# query
query = { "binding_id" : binding.binding_id, "instance_id" : binding.instance.instance_id }
# delete the binding
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Binding")
# return the result
if result is not None and result.deleted_count == 1:
binding.provisioned = False
else:
raise ErrStorageRemoveBinding(binding.binding_id)
|
mickybart/python-atlasbroker | atlasbroker/storage.py | AtlasBrokerStorage.remove_instance | python | def remove_instance(self, instance):
# query
query = { "instance_id" : instance.instance_id, "binding_id" : { "$exists" : False } }
# delete the instance
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Instance")
# return the result
if result is not None and result.deleted_count == 1:
instance.provisioned = False
else:
raise ErrStorageRemoveInstance(instance.instance_id) | Remove an instance
Remove an object from the MongoDB storage for caching
Args:
instance (AtlasServiceInstance.Instance): instance
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveInstance: Failed to remove the instance. | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/storage.py#L165-L191 | null | class AtlasBrokerStorage:
""" Storage
Permit to store ServiceInstance and ServiceBinding into a MongoDB.
This is used for caching and to trace what is done by the broker.
This is internally used to don't create same instances/bindings and to return appropriate code like AlreadyExists
That reducing the number of call to Atlas APIs too.
Constructor
Args:
uri (str): MongoDB connection string
timeoutms (int): MongoDB requests timeout in ms
db (str): The DB name
collection (str): The collection name
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
"""
def __init__(self, uri, timeoutms, db, collection):
self.mongo_client = None
# Connect to Mongo
try:
print("connection to mongo...")
# Init Mongo and create DB and collections objects
self.mongo_client = pymongo.MongoClient(uri, timeoutms)
self.db = self.mongo_client[db]
self.broker = self.db.get_collection(collection)
if len(self.broker.index_information()) == 0:
# collection does not exist
# create it and create indexes
self.db.create_collection(collection)
self.broker.create_index( "instance_id" )
self.broker.create_index( "binding_id" )
print("mongo: connected")
except Exception as e:
print("mongo: " + str(e))
self.mongo_client = None
raise ErrStorageMongoConnection("Initialization")
def populate(self, obj):
""" Populate
Query mongo to get information about the obj if it exists
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageMongoConnection: Error during MongoDB communication.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "binding_id" : { "$exists" : False } }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "instance_id" : obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# find
try:
result = self.broker.find_one(query)
except:
raise ErrStorageMongoConnection("Populate Instance or Binding")
if result is not None:
obj.parameters = result["parameters"]
# Flags the obj to provisioned
obj.provisioned = True
else:
# New
obj.provisioned = False
def store(self, obj):
""" Store
Store an object into the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Returns:
ObjectId: MongoDB _id
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageStore : Failed to store the binding or instance.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# insert
try:
result = self.broker.insert_one(query)
except:
raise ErrStorageMongoConnection("Store Instance or Binding")
if result is not None:
# Flags the obj to provisioned
obj.provisioned = True
return result.inserted_id
raise ErrStorageStore()
def remove(self, obj):
""" Remove
Remove an object from the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
"""
if type(obj) is AtlasServiceInstance.Instance:
self.remove_instance(obj)
elif type(obj) is AtlasServiceBinding.Binding:
self.remove_binding(obj)
else:
raise ErrStorageTypeUnsupported(type(obj))
def remove_instance(self, instance):
""" Remove an instance
Remove an object from the MongoDB storage for caching
Args:
instance (AtlasServiceInstance.Instance): instance
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveInstance: Failed to remove the instance.
"""
# query
query = { "instance_id" : instance.instance_id, "binding_id" : { "$exists" : False } }
# delete the instance
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Instance")
# return the result
if result is not None and result.deleted_count == 1:
instance.provisioned = False
else:
raise ErrStorageRemoveInstance(instance.instance_id)
def remove_binding(self, binding):
""" Remove a binding
Remove an object from the MongoDB storage for caching
Args:
binding (AtlasServiceBinding.Binding): binding
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveBinding: Failed to remove the binding
"""
# query
query = { "binding_id" : binding.binding_id, "instance_id" : binding.instance.instance_id }
# delete the binding
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Binding")
# return the result
if result is not None and result.deleted_count == 1:
binding.provisioned = False
else:
raise ErrStorageRemoveBinding(binding.binding_id)
|
mickybart/python-atlasbroker | atlasbroker/storage.py | AtlasBrokerStorage.remove_binding | python | def remove_binding(self, binding):
# query
query = { "binding_id" : binding.binding_id, "instance_id" : binding.instance.instance_id }
# delete the binding
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Binding")
# return the result
if result is not None and result.deleted_count == 1:
binding.provisioned = False
else:
raise ErrStorageRemoveBinding(binding.binding_id) | Remove a binding
Remove an object from the MongoDB storage for caching
Args:
binding (AtlasServiceBinding.Binding): binding
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveBinding: Failed to remove the binding | train | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/storage.py#L193-L219 | null | class AtlasBrokerStorage:
""" Storage
Permit to store ServiceInstance and ServiceBinding into a MongoDB.
This is used for caching and to trace what is done by the broker.
This is internally used to don't create same instances/bindings and to return appropriate code like AlreadyExists
That reducing the number of call to Atlas APIs too.
Constructor
Args:
uri (str): MongoDB connection string
timeoutms (int): MongoDB requests timeout in ms
db (str): The DB name
collection (str): The collection name
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
"""
def __init__(self, uri, timeoutms, db, collection):
self.mongo_client = None
# Connect to Mongo
try:
print("connection to mongo...")
# Init Mongo and create DB and collections objects
self.mongo_client = pymongo.MongoClient(uri, timeoutms)
self.db = self.mongo_client[db]
self.broker = self.db.get_collection(collection)
if len(self.broker.index_information()) == 0:
# collection does not exist
# create it and create indexes
self.db.create_collection(collection)
self.broker.create_index( "instance_id" )
self.broker.create_index( "binding_id" )
print("mongo: connected")
except Exception as e:
print("mongo: " + str(e))
self.mongo_client = None
raise ErrStorageMongoConnection("Initialization")
def populate(self, obj):
""" Populate
Query mongo to get information about the obj if it exists
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageMongoConnection: Error during MongoDB communication.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "binding_id" : { "$exists" : False } }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "instance_id" : obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# find
try:
result = self.broker.find_one(query)
except:
raise ErrStorageMongoConnection("Populate Instance or Binding")
if result is not None:
obj.parameters = result["parameters"]
# Flags the obj to provisioned
obj.provisioned = True
else:
# New
obj.provisioned = False
def store(self, obj):
""" Store
Store an object into the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Returns:
ObjectId: MongoDB _id
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageTypeUnsupported: Type unsupported.
ErrStorageStore : Failed to store the binding or instance.
"""
# query
if type(obj) is AtlasServiceInstance.Instance:
query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters }
elif type(obj) is AtlasServiceBinding.Binding:
query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id }
else:
raise ErrStorageTypeUnsupported(type(obj))
# insert
try:
result = self.broker.insert_one(query)
except:
raise ErrStorageMongoConnection("Store Instance or Binding")
if result is not None:
# Flags the obj to provisioned
obj.provisioned = True
return result.inserted_id
raise ErrStorageStore()
def remove(self, obj):
""" Remove
Remove an object from the MongoDB storage for caching
Args:
obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding
Raises:
ErrStorageTypeUnsupported: Type unsupported.
"""
if type(obj) is AtlasServiceInstance.Instance:
self.remove_instance(obj)
elif type(obj) is AtlasServiceBinding.Binding:
self.remove_binding(obj)
else:
raise ErrStorageTypeUnsupported(type(obj))
def remove_instance(self, instance):
""" Remove an instance
Remove an object from the MongoDB storage for caching
Args:
instance (AtlasServiceInstance.Instance): instance
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveInstance: Failed to remove the instance.
"""
# query
query = { "instance_id" : instance.instance_id, "binding_id" : { "$exists" : False } }
# delete the instance
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Instance")
# return the result
if result is not None and result.deleted_count == 1:
instance.provisioned = False
else:
raise ErrStorageRemoveInstance(instance.instance_id)
def remove_binding(self, binding):
""" Remove a binding
Remove an object from the MongoDB storage for caching
Args:
binding (AtlasServiceBinding.Binding): binding
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveBinding: Failed to remove the binding
"""
# query
query = { "binding_id" : binding.binding_id, "instance_id" : binding.instance.instance_id }
# delete the binding
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Binding")
# return the result
if result is not None and result.deleted_count == 1:
binding.provisioned = False
else:
raise ErrStorageRemoveBinding(binding.binding_id)
|
vicalloy/lbutils | lbutils/qs.py | get_sum | python | def get_sum(qs, field):
sum_field = '%s__sum' % field
qty = qs.aggregate(Sum(field))[sum_field]
return qty if qty else 0 | get sum for queryset.
``qs``: queryset
``field``: The field name to sum. | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/qs.py#L28-L37 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import six
from django.db.models import Q, F
from django.db.models import Sum
from django.db.models import Max
__all__ = (
'get_or_none', 'get_pk_or_none', 'get_sum',
'get_max', 'do_filter',
)
def get_or_none(model_class, *args, **kwargs):
try:
return model_class.objects.get(*args, **kwargs)
except Exception:
return None
def get_pk_or_none(model_class, *args, **kwargs):
obj = get_or_none(model_class, *args, **kwargs)
return obj.pk if obj else None
def get_max(qs, field):
"""
get max for queryset.
qs: queryset
field: The field name to max.
"""
max_field = '%s__max' % field
num = qs.aggregate(Max(field))[max_field]
return num if num else 0
def do_filter(qs, qdata, quick_query_fields=[], int_quick_query_fields=[]):
"""
auto filter queryset by dict.
qs: queryset need to filter.
qdata:
quick_query_fields:
int_quick_query_fields:
"""
try:
qs = qs.filter(
__gen_quick_query_params(
qdata.get('q_quick_search_kw'), quick_query_fields,
int_quick_query_fields)
)
q, kw_query_params = __gen_query_params(qdata)
qs = qs.filter(q, **kw_query_params)
except:
import traceback
traceback.print_exc()
return qs
def __gen_quick_query_params(value, fields, int_fields):
q = Q()
if not value:
return q
for field in fields:
d = {"%s__icontains" % field: value}
q = q | Q(**d)
if value.isdigit():
for f in int_fields:
d = {f: value}
q = q | Q(**d)
return q
def __gen_query_params(qdata):
q = Q()
kw_query_params = {}
for k, v in qdata.items():
if k.startswith('q__'):
k = k[3:]
if not isinstance(v, six.text_type):
if v is not None:
kw_query_params[k] = v
continue
if v == '':
continue
v = v.replace(',', ',')
if v.startswith('F__'):
v = F(v[3:])
elif k.endswith('__in'):
v = [e for e in v.split(',') if e]
elif ',' in v:
tmp_q = Q()
v = [e for e in v.split(',') if e]
for o in v:
tmp_q = tmp_q | Q(**{k: o})
q = q & tmp_q
continue
if isinstance(v, six.text_type):
v = {'__True': True, '__False': False}.get(v, v)
kw_query_params[k] = v
return q, kw_query_params
|
vicalloy/lbutils | lbutils/qs.py | get_max | python | def get_max(qs, field):
max_field = '%s__max' % field
num = qs.aggregate(Max(field))[max_field]
return num if num else 0 | get max for queryset.
qs: queryset
field: The field name to max. | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/qs.py#L40-L49 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import six
from django.db.models import Q, F
from django.db.models import Sum
from django.db.models import Max
__all__ = (
'get_or_none', 'get_pk_or_none', 'get_sum',
'get_max', 'do_filter',
)
def get_or_none(model_class, *args, **kwargs):
try:
return model_class.objects.get(*args, **kwargs)
except Exception:
return None
def get_pk_or_none(model_class, *args, **kwargs):
obj = get_or_none(model_class, *args, **kwargs)
return obj.pk if obj else None
def get_sum(qs, field):
"""
get sum for queryset.
``qs``: queryset
``field``: The field name to sum.
"""
sum_field = '%s__sum' % field
qty = qs.aggregate(Sum(field))[sum_field]
return qty if qty else 0
def do_filter(qs, qdata, quick_query_fields=[], int_quick_query_fields=[]):
"""
auto filter queryset by dict.
qs: queryset need to filter.
qdata:
quick_query_fields:
int_quick_query_fields:
"""
try:
qs = qs.filter(
__gen_quick_query_params(
qdata.get('q_quick_search_kw'), quick_query_fields,
int_quick_query_fields)
)
q, kw_query_params = __gen_query_params(qdata)
qs = qs.filter(q, **kw_query_params)
except:
import traceback
traceback.print_exc()
return qs
def __gen_quick_query_params(value, fields, int_fields):
q = Q()
if not value:
return q
for field in fields:
d = {"%s__icontains" % field: value}
q = q | Q(**d)
if value.isdigit():
for f in int_fields:
d = {f: value}
q = q | Q(**d)
return q
def __gen_query_params(qdata):
q = Q()
kw_query_params = {}
for k, v in qdata.items():
if k.startswith('q__'):
k = k[3:]
if not isinstance(v, six.text_type):
if v is not None:
kw_query_params[k] = v
continue
if v == '':
continue
v = v.replace(',', ',')
if v.startswith('F__'):
v = F(v[3:])
elif k.endswith('__in'):
v = [e for e in v.split(',') if e]
elif ',' in v:
tmp_q = Q()
v = [e for e in v.split(',') if e]
for o in v:
tmp_q = tmp_q | Q(**{k: o})
q = q & tmp_q
continue
if isinstance(v, six.text_type):
v = {'__True': True, '__False': False}.get(v, v)
kw_query_params[k] = v
return q, kw_query_params
|
vicalloy/lbutils | lbutils/qs.py | do_filter | python | def do_filter(qs, qdata, quick_query_fields=[], int_quick_query_fields=[]):
try:
qs = qs.filter(
__gen_quick_query_params(
qdata.get('q_quick_search_kw'), quick_query_fields,
int_quick_query_fields)
)
q, kw_query_params = __gen_query_params(qdata)
qs = qs.filter(q, **kw_query_params)
except:
import traceback
traceback.print_exc()
return qs | auto filter queryset by dict.
qs: queryset need to filter.
qdata:
quick_query_fields:
int_quick_query_fields: | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/qs.py#L52-L72 | [
"def __gen_quick_query_params(value, fields, int_fields):\n q = Q()\n if not value:\n return q\n for field in fields:\n d = {\"%s__icontains\" % field: value}\n q = q | Q(**d)\n if value.isdigit():\n for f in int_fields:\n d = {f: value}\n q = q | Q(**d)\n return q\n",
"def __gen_query_params(qdata):\n q = Q()\n kw_query_params = {}\n for k, v in qdata.items():\n if k.startswith('q__'):\n k = k[3:]\n if not isinstance(v, six.text_type):\n if v is not None:\n kw_query_params[k] = v\n continue\n if v == '':\n continue\n v = v.replace(',', ',')\n if v.startswith('F__'):\n v = F(v[3:])\n elif k.endswith('__in'):\n v = [e for e in v.split(',') if e]\n elif ',' in v:\n tmp_q = Q()\n v = [e for e in v.split(',') if e]\n for o in v:\n tmp_q = tmp_q | Q(**{k: o})\n q = q & tmp_q\n continue\n if isinstance(v, six.text_type):\n v = {'__True': True, '__False': False}.get(v, v)\n kw_query_params[k] = v\n return q, kw_query_params\n"
] | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import six
from django.db.models import Q, F
from django.db.models import Sum
from django.db.models import Max
__all__ = (
'get_or_none', 'get_pk_or_none', 'get_sum',
'get_max', 'do_filter',
)
def get_or_none(model_class, *args, **kwargs):
try:
return model_class.objects.get(*args, **kwargs)
except Exception:
return None
def get_pk_or_none(model_class, *args, **kwargs):
obj = get_or_none(model_class, *args, **kwargs)
return obj.pk if obj else None
def get_sum(qs, field):
"""
get sum for queryset.
``qs``: queryset
``field``: The field name to sum.
"""
sum_field = '%s__sum' % field
qty = qs.aggregate(Sum(field))[sum_field]
return qty if qty else 0
def get_max(qs, field):
"""
get max for queryset.
qs: queryset
field: The field name to max.
"""
max_field = '%s__max' % field
num = qs.aggregate(Max(field))[max_field]
return num if num else 0
def __gen_quick_query_params(value, fields, int_fields):
q = Q()
if not value:
return q
for field in fields:
d = {"%s__icontains" % field: value}
q = q | Q(**d)
if value.isdigit():
for f in int_fields:
d = {f: value}
q = q | Q(**d)
return q
def __gen_query_params(qdata):
q = Q()
kw_query_params = {}
for k, v in qdata.items():
if k.startswith('q__'):
k = k[3:]
if not isinstance(v, six.text_type):
if v is not None:
kw_query_params[k] = v
continue
if v == '':
continue
v = v.replace(',', ',')
if v.startswith('F__'):
v = F(v[3:])
elif k.endswith('__in'):
v = [e for e in v.split(',') if e]
elif ',' in v:
tmp_q = Q()
v = [e for e in v.split(',') if e]
for o in v:
tmp_q = tmp_q | Q(**{k: o})
q = q & tmp_q
continue
if isinstance(v, six.text_type):
v = {'__True': True, '__False': False}.get(v, v)
kw_query_params[k] = v
return q, kw_query_params
|
vicalloy/lbutils | lbutils/dateutils.py | fmt_datetime | python | def fmt_datetime(d, fmt='', local=True):
if not d:
return ''
if local:
from django.templatetags.tz import localtime
d = localtime(d)
if not fmt:
fmt = '%Y-%m-%d %H-%M'
return d.strftime(fmt) | format date with local support
``d``: datetime to format
``fmt``: format, default is '%Y-%m-%d %H-%M'
``local``: format as local time | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/dateutils.py#L51-L65 | null | from __future__ import unicode_literals
from datetime import datetime
__all__ = (
'get_month_choices', 'MONTH_CHOICES', 'fmt_month',
'get_year_choices', 'fmt_datetime',
)
def get_month_choices(blank_label='------'):
choices = [
('01', '01'),
('02', '02'),
('03', '03'),
('04', '04'),
('05', '05'),
('06', '06'),
('07', '07'),
('08', '08'),
('09', '09'),
('10', '10'),
('11', '11'),
('12', '12'),
]
if blank_label:
choices.insert(0, ('', blank_label))
return choices
MONTH_CHOICES = get_month_choices(None)
def fmt_month(month):
""" format month(1 to 01) """
if not month:
return month
return ("%s" % month).zfill(2)
def get_year_choices(start_year=-5, end_year=2, blank_label=''):
year = datetime.today().year
years = range(year + start_year, year + end_year)
choices = [(e, e) for e in years]
if blank_label:
choices.insert(0, ('', blank_label))
return choices
|
vicalloy/lbutils | lbutils/templatetags/lbutils.py | get_setting | python | def get_setting(context, key, default_val="", as_key=None):
if ("%s" % default_val).startswith('$.'):
default_val = getattr(settings, default_val[2:])
val = getattr(settings, key, default_val)
if not as_key:
return val
context[as_key] = val
return '' | get val form settings and set to context
{% load lbutils %}
{% get_setting "key" default_val "as_key" %}
{{ as_key }}
if as_key is None, this tag will return val | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/templatetags/lbutils.py#L12-L26 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag(takes_context=True)
@register.filter(name='boolean_icon')
def boolean_icon(v):
if v:
return mark_safe('<i class="fa fa-fw fa-check-circle"/>')
return ''
@register.filter(name='display_array')
def display_array(objs):
if not objs:
return ''
return ', '.join(['%s' % e for e in objs])
@register.simple_tag
def getvars(request, excludes):
getvars = request.GET.copy()
excludes = excludes.split(',')
for p in excludes:
if p in getvars:
del getvars[p]
if len(getvars.keys()) > 0:
return "&%s" % getvars.urlencode()
else:
return ''
|
vicalloy/lbutils | lbutils/utils.py | safe_eval | python | def safe_eval(source, *args, **kwargs):
source = source.replace('import', '') # import is not allowed
return eval(source, *args, **kwargs) | eval without import | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/utils.py#L15-L18 | null | from __future__ import unicode_literals
import math
import importlib
from django.template.defaultfilters import floatformat
from django.contrib.humanize.templatetags.humanize import intcomma
__all__ = (
'safe_eval', 'fmt_num', 'as_callable', 'create_instance',
'format_filesize',
)
def fmt_num(num, zero_num=None):
""" humanize number(9000 to 9,000) """
if zero_num is not None:
num = floatformat(num, zero_num)
return intcomma(num, False)
def as_callable(callable_path):
if callable(callable_path):
return callable_path
idx = callable_path.rindex(r'.')
_module = importlib.import_module(callable_path[:idx])
return getattr(_module, callable_path[idx + 1:])
def create_instance(class_name, *args, **kwargs):
"""
create class instance
class_name: name of class i.e.: "django.http.HttpResponse"
*args, **kwargs: param for class
"""
return as_callable(class_name)(*args, **kwargs)
def format_filesize(size):
if (size < 1024):
return '%s B' % size
size = size / 1024.0
size_name = ("KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size, 1024)))
p = math.pow(1024, i)
s = round(size / p, 2)
return '%s %s' % (s, size_name[i])
|
vicalloy/lbutils | lbutils/utils.py | fmt_num | python | def fmt_num(num, zero_num=None):
if zero_num is not None:
num = floatformat(num, zero_num)
return intcomma(num, False) | humanize number(9000 to 9,000) | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/utils.py#L21-L25 | null | from __future__ import unicode_literals
import math
import importlib
from django.template.defaultfilters import floatformat
from django.contrib.humanize.templatetags.humanize import intcomma
__all__ = (
'safe_eval', 'fmt_num', 'as_callable', 'create_instance',
'format_filesize',
)
def safe_eval(source, *args, **kwargs):
""" eval without import """
source = source.replace('import', '') # import is not allowed
return eval(source, *args, **kwargs)
def as_callable(callable_path):
if callable(callable_path):
return callable_path
idx = callable_path.rindex(r'.')
_module = importlib.import_module(callable_path[:idx])
return getattr(_module, callable_path[idx + 1:])
def create_instance(class_name, *args, **kwargs):
"""
create class instance
class_name: name of class i.e.: "django.http.HttpResponse"
*args, **kwargs: param for class
"""
return as_callable(class_name)(*args, **kwargs)
def format_filesize(size):
if (size < 1024):
return '%s B' % size
size = size / 1024.0
size_name = ("KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size, 1024)))
p = math.pow(1024, i)
s = round(size / p, 2)
return '%s %s' % (s, size_name[i])
|
vicalloy/lbutils | lbutils/forms.py | FormHelperMixin.errors_as_text | python | def errors_as_text(self):
errors = []
errors.append(self.non_field_errors().as_text())
errors_data = self.errors.as_data()
for key, value in errors_data.items():
field_label = self.fields[key].label
err_descn = ''.join([force_text(e.message) for e in value])
error = "%s %s" % (field_label, err_descn)
errors.append(error)
return ','.join(errors) | only available to Django 1.7+ | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/forms.py#L32-L44 | null | class FormHelperMixin(object):
def as_readonly_fields(self, fields=[], exclude=[], include_all_if_empty=True):
self.add_attr2fields(
'readonly', 'readonly',
fields=fields, exclude=exclude, include_all_if_empty=True)
def as_text_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.widget = TextWidget(src_widget=f.widget)
def as_hidden_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.required = False
if isinstance(f.widget, (forms.SelectMultiple, JustSelectedSelectMultiple)):
f.widget = forms.MultipleHiddenInput()
else:
f.widget = forms.HiddenInput()
def add_attr2fields(self, attr_name, attr_val, fields=[], exclude=[], include_all_if_empty=True):
"""
add attr to fields
"""
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
org_val = f.widget.attrs.get(attr_name, '')
f.widget.attrs[attr_name] = '%s %s' % (org_val, attr_val) if org_val else attr_val
def add_class2fields(self, html_class, fields=[], exclude=[], include_all_if_empty=True):
"""
add class to html widgets.
"""
self.add_attr2fields('class', html_class, fields, exclude)
def filter_fields(self, fields=[], exclude=[], include_all_if_empty=True):
"""
filter fields
fields:
exclude:
include_all_if_empty: if fields is empty return all fields
return: fileds
"""
if not include_all_if_empty and not fields:
return []
ret = []
for f in self.visible_fields():
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
ret.append(f)
return ret
def as_required_fields(self, fields=[]):
""" set required to True """
fields = self.filter_fields(fields)
for f in fields:
f = self.fields[f.name]
f.required = True
def check_uniqe(self, obj_class, error_msg=_('Must be unique'), **kwargs):
""" check if this object is unique """
if obj_class.objects.filter(**kwargs).exclude(pk=self.instance.pk):
raise forms.ValidationError(error_msg)
def row_div(self, fnames, span=4):
fields = self.filter_fields(fnames)
fnames = [e.name for e in fields if not e.is_hidden]
return row_div(fnames, span=span)
|
vicalloy/lbutils | lbutils/forms.py | FormHelperMixin.add_attr2fields | python | def add_attr2fields(self, attr_name, attr_val, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
org_val = f.widget.attrs.get(attr_name, '')
f.widget.attrs[attr_name] = '%s %s' % (org_val, attr_val) if org_val else attr_val | add attr to fields | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/forms.py#L65-L72 | [
"def filter_fields(self, fields=[], exclude=[], include_all_if_empty=True):\n \"\"\"\n filter fields\n\n fields:\n exclude:\n include_all_if_empty: if fields is empty return all fields\n\n return: fileds\n \"\"\"\n if not include_all_if_empty and not fields:\n return []\n ret = []\n for f in self.visible_fields():\n if fields and f.name not in fields:\n continue\n if exclude and f.name in exclude:\n continue\n ret.append(f)\n return ret\n"
] | class FormHelperMixin(object):
def errors_as_text(self):
"""
only available to Django 1.7+
"""
errors = []
errors.append(self.non_field_errors().as_text())
errors_data = self.errors.as_data()
for key, value in errors_data.items():
field_label = self.fields[key].label
err_descn = ''.join([force_text(e.message) for e in value])
error = "%s %s" % (field_label, err_descn)
errors.append(error)
return ','.join(errors)
def as_readonly_fields(self, fields=[], exclude=[], include_all_if_empty=True):
self.add_attr2fields(
'readonly', 'readonly',
fields=fields, exclude=exclude, include_all_if_empty=True)
def as_text_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.widget = TextWidget(src_widget=f.widget)
def as_hidden_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.required = False
if isinstance(f.widget, (forms.SelectMultiple, JustSelectedSelectMultiple)):
f.widget = forms.MultipleHiddenInput()
else:
f.widget = forms.HiddenInput()
def add_class2fields(self, html_class, fields=[], exclude=[], include_all_if_empty=True):
"""
add class to html widgets.
"""
self.add_attr2fields('class', html_class, fields, exclude)
def filter_fields(self, fields=[], exclude=[], include_all_if_empty=True):
"""
filter fields
fields:
exclude:
include_all_if_empty: if fields is empty return all fields
return: fileds
"""
if not include_all_if_empty and not fields:
return []
ret = []
for f in self.visible_fields():
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
ret.append(f)
return ret
def as_required_fields(self, fields=[]):
""" set required to True """
fields = self.filter_fields(fields)
for f in fields:
f = self.fields[f.name]
f.required = True
def check_uniqe(self, obj_class, error_msg=_('Must be unique'), **kwargs):
""" check if this object is unique """
if obj_class.objects.filter(**kwargs).exclude(pk=self.instance.pk):
raise forms.ValidationError(error_msg)
def row_div(self, fnames, span=4):
fields = self.filter_fields(fnames)
fnames = [e.name for e in fields if not e.is_hidden]
return row_div(fnames, span=span)
|
vicalloy/lbutils | lbutils/forms.py | FormHelperMixin.add_class2fields | python | def add_class2fields(self, html_class, fields=[], exclude=[], include_all_if_empty=True):
self.add_attr2fields('class', html_class, fields, exclude) | add class to html widgets. | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/forms.py#L74-L78 | [
"def add_attr2fields(self, attr_name, attr_val, fields=[], exclude=[], include_all_if_empty=True):\n \"\"\"\n add attr to fields\n \"\"\"\n for f in self.filter_fields(fields, exclude, include_all_if_empty):\n f = self.fields[f.name]\n org_val = f.widget.attrs.get(attr_name, '')\n f.widget.attrs[attr_name] = '%s %s' % (org_val, attr_val) if org_val else attr_val\n"
] | class FormHelperMixin(object):
def errors_as_text(self):
"""
only available to Django 1.7+
"""
errors = []
errors.append(self.non_field_errors().as_text())
errors_data = self.errors.as_data()
for key, value in errors_data.items():
field_label = self.fields[key].label
err_descn = ''.join([force_text(e.message) for e in value])
error = "%s %s" % (field_label, err_descn)
errors.append(error)
return ','.join(errors)
def as_readonly_fields(self, fields=[], exclude=[], include_all_if_empty=True):
self.add_attr2fields(
'readonly', 'readonly',
fields=fields, exclude=exclude, include_all_if_empty=True)
def as_text_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.widget = TextWidget(src_widget=f.widget)
def as_hidden_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.required = False
if isinstance(f.widget, (forms.SelectMultiple, JustSelectedSelectMultiple)):
f.widget = forms.MultipleHiddenInput()
else:
f.widget = forms.HiddenInput()
def add_attr2fields(self, attr_name, attr_val, fields=[], exclude=[], include_all_if_empty=True):
"""
add attr to fields
"""
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
org_val = f.widget.attrs.get(attr_name, '')
f.widget.attrs[attr_name] = '%s %s' % (org_val, attr_val) if org_val else attr_val
def filter_fields(self, fields=[], exclude=[], include_all_if_empty=True):
"""
filter fields
fields:
exclude:
include_all_if_empty: if fields is empty return all fields
return: fileds
"""
if not include_all_if_empty and not fields:
return []
ret = []
for f in self.visible_fields():
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
ret.append(f)
return ret
def as_required_fields(self, fields=[]):
""" set required to True """
fields = self.filter_fields(fields)
for f in fields:
f = self.fields[f.name]
f.required = True
def check_uniqe(self, obj_class, error_msg=_('Must be unique'), **kwargs):
""" check if this object is unique """
if obj_class.objects.filter(**kwargs).exclude(pk=self.instance.pk):
raise forms.ValidationError(error_msg)
def row_div(self, fnames, span=4):
fields = self.filter_fields(fnames)
fnames = [e.name for e in fields if not e.is_hidden]
return row_div(fnames, span=span)
|
vicalloy/lbutils | lbutils/forms.py | FormHelperMixin.filter_fields | python | def filter_fields(self, fields=[], exclude=[], include_all_if_empty=True):
if not include_all_if_empty and not fields:
return []
ret = []
for f in self.visible_fields():
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
ret.append(f)
return ret | filter fields
fields:
exclude:
include_all_if_empty: if fields is empty return all fields
return: fileds | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/forms.py#L80-L99 | null | class FormHelperMixin(object):
def errors_as_text(self):
"""
only available to Django 1.7+
"""
errors = []
errors.append(self.non_field_errors().as_text())
errors_data = self.errors.as_data()
for key, value in errors_data.items():
field_label = self.fields[key].label
err_descn = ''.join([force_text(e.message) for e in value])
error = "%s %s" % (field_label, err_descn)
errors.append(error)
return ','.join(errors)
def as_readonly_fields(self, fields=[], exclude=[], include_all_if_empty=True):
self.add_attr2fields(
'readonly', 'readonly',
fields=fields, exclude=exclude, include_all_if_empty=True)
def as_text_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.widget = TextWidget(src_widget=f.widget)
def as_hidden_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.required = False
if isinstance(f.widget, (forms.SelectMultiple, JustSelectedSelectMultiple)):
f.widget = forms.MultipleHiddenInput()
else:
f.widget = forms.HiddenInput()
def add_attr2fields(self, attr_name, attr_val, fields=[], exclude=[], include_all_if_empty=True):
"""
add attr to fields
"""
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
org_val = f.widget.attrs.get(attr_name, '')
f.widget.attrs[attr_name] = '%s %s' % (org_val, attr_val) if org_val else attr_val
def add_class2fields(self, html_class, fields=[], exclude=[], include_all_if_empty=True):
"""
add class to html widgets.
"""
self.add_attr2fields('class', html_class, fields, exclude)
def as_required_fields(self, fields=[]):
""" set required to True """
fields = self.filter_fields(fields)
for f in fields:
f = self.fields[f.name]
f.required = True
def check_uniqe(self, obj_class, error_msg=_('Must be unique'), **kwargs):
""" check if this object is unique """
if obj_class.objects.filter(**kwargs).exclude(pk=self.instance.pk):
raise forms.ValidationError(error_msg)
def row_div(self, fnames, span=4):
fields = self.filter_fields(fnames)
fnames = [e.name for e in fields if not e.is_hidden]
return row_div(fnames, span=span)
|
vicalloy/lbutils | lbutils/forms.py | FormHelperMixin.as_required_fields | python | def as_required_fields(self, fields=[]):
fields = self.filter_fields(fields)
for f in fields:
f = self.fields[f.name]
f.required = True | set required to True | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/forms.py#L101-L106 | [
"def filter_fields(self, fields=[], exclude=[], include_all_if_empty=True):\n \"\"\"\n filter fields\n\n fields:\n exclude:\n include_all_if_empty: if fields is empty return all fields\n\n return: fileds\n \"\"\"\n if not include_all_if_empty and not fields:\n return []\n ret = []\n for f in self.visible_fields():\n if fields and f.name not in fields:\n continue\n if exclude and f.name in exclude:\n continue\n ret.append(f)\n return ret\n"
] | class FormHelperMixin(object):
def errors_as_text(self):
"""
only available to Django 1.7+
"""
errors = []
errors.append(self.non_field_errors().as_text())
errors_data = self.errors.as_data()
for key, value in errors_data.items():
field_label = self.fields[key].label
err_descn = ''.join([force_text(e.message) for e in value])
error = "%s %s" % (field_label, err_descn)
errors.append(error)
return ','.join(errors)
def as_readonly_fields(self, fields=[], exclude=[], include_all_if_empty=True):
self.add_attr2fields(
'readonly', 'readonly',
fields=fields, exclude=exclude, include_all_if_empty=True)
def as_text_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.widget = TextWidget(src_widget=f.widget)
def as_hidden_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.required = False
if isinstance(f.widget, (forms.SelectMultiple, JustSelectedSelectMultiple)):
f.widget = forms.MultipleHiddenInput()
else:
f.widget = forms.HiddenInput()
def add_attr2fields(self, attr_name, attr_val, fields=[], exclude=[], include_all_if_empty=True):
"""
add attr to fields
"""
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
org_val = f.widget.attrs.get(attr_name, '')
f.widget.attrs[attr_name] = '%s %s' % (org_val, attr_val) if org_val else attr_val
def add_class2fields(self, html_class, fields=[], exclude=[], include_all_if_empty=True):
"""
add class to html widgets.
"""
self.add_attr2fields('class', html_class, fields, exclude)
def filter_fields(self, fields=[], exclude=[], include_all_if_empty=True):
"""
filter fields
fields:
exclude:
include_all_if_empty: if fields is empty return all fields
return: fileds
"""
if not include_all_if_empty and not fields:
return []
ret = []
for f in self.visible_fields():
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
ret.append(f)
return ret
def check_uniqe(self, obj_class, error_msg=_('Must be unique'), **kwargs):
""" check if this object is unique """
if obj_class.objects.filter(**kwargs).exclude(pk=self.instance.pk):
raise forms.ValidationError(error_msg)
def row_div(self, fnames, span=4):
fields = self.filter_fields(fnames)
fnames = [e.name for e in fields if not e.is_hidden]
return row_div(fnames, span=span)
|
vicalloy/lbutils | lbutils/forms.py | FormHelperMixin.check_uniqe | python | def check_uniqe(self, obj_class, error_msg=_('Must be unique'), **kwargs):
if obj_class.objects.filter(**kwargs).exclude(pk=self.instance.pk):
raise forms.ValidationError(error_msg) | check if this object is unique | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/forms.py#L108-L111 | null | class FormHelperMixin(object):
def errors_as_text(self):
"""
only available to Django 1.7+
"""
errors = []
errors.append(self.non_field_errors().as_text())
errors_data = self.errors.as_data()
for key, value in errors_data.items():
field_label = self.fields[key].label
err_descn = ''.join([force_text(e.message) for e in value])
error = "%s %s" % (field_label, err_descn)
errors.append(error)
return ','.join(errors)
def as_readonly_fields(self, fields=[], exclude=[], include_all_if_empty=True):
self.add_attr2fields(
'readonly', 'readonly',
fields=fields, exclude=exclude, include_all_if_empty=True)
def as_text_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.widget = TextWidget(src_widget=f.widget)
def as_hidden_fields(self, fields=[], exclude=[], include_all_if_empty=True):
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
f.required = False
if isinstance(f.widget, (forms.SelectMultiple, JustSelectedSelectMultiple)):
f.widget = forms.MultipleHiddenInput()
else:
f.widget = forms.HiddenInput()
def add_attr2fields(self, attr_name, attr_val, fields=[], exclude=[], include_all_if_empty=True):
"""
add attr to fields
"""
for f in self.filter_fields(fields, exclude, include_all_if_empty):
f = self.fields[f.name]
org_val = f.widget.attrs.get(attr_name, '')
f.widget.attrs[attr_name] = '%s %s' % (org_val, attr_val) if org_val else attr_val
def add_class2fields(self, html_class, fields=[], exclude=[], include_all_if_empty=True):
"""
add class to html widgets.
"""
self.add_attr2fields('class', html_class, fields, exclude)
def filter_fields(self, fields=[], exclude=[], include_all_if_empty=True):
"""
filter fields
fields:
exclude:
include_all_if_empty: if fields is empty return all fields
return: fileds
"""
if not include_all_if_empty and not fields:
return []
ret = []
for f in self.visible_fields():
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
ret.append(f)
return ret
def as_required_fields(self, fields=[]):
""" set required to True """
fields = self.filter_fields(fields)
for f in fields:
f = self.fields[f.name]
f.required = True
def row_div(self, fnames, span=4):
fields = self.filter_fields(fnames)
fnames = [e.name for e in fields if not e.is_hidden]
return row_div(fnames, span=span)
|
vicalloy/lbutils | lbutils/xlsxutils.py | export_xlsx | python | def export_xlsx(wb, output, fn):
wb.close()
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.ms-excel")
cd = codecs.encode('attachment;filename=%s' % fn, 'utf-8')
response['Content-Disposition'] = cd
return response | export as excel
wb:
output:
fn: file name | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/xlsxutils.py#L20-L32 | null | from __future__ import unicode_literals
import codecs
from datetime import datetime
from django.http import HttpResponse
from django.utils.six import BytesIO
try:
import xlsxwriter as xlwt
except ImportError:
pass
__all__ = (
'export_xlsx', 'xlsw_write_row', 'simple_export2xlsx',
)
def xlsw_write_row(ws, row_idx, row, fmt=None):
"""
ws:
row_idx: row number
row: a list, data to write
fmt: format for cell
"""
for col_idx in range(len(row)):
ws.write(row_idx, col_idx, row[col_idx], fmt)
row_idx += 1
return row_idx
def simple_export2xlsx(filename, titles, qs, func_data):
"""
export as excel
filename: file name
titles: title for this table
qs: queryset to export
func_data: a function to format object to list. ex: `lambda o: [o.pk, o.name]`
"""
output = BytesIO()
wb = xlwt.Workbook(output)
ws = wb.add_worksheet(filename)
header_fmt = wb.add_format()
header_fmt.set_bg_color('#C4D89E')
row_idx = 0
row_idx = xlsw_write_row(ws, row_idx, titles, header_fmt)
for o in qs:
row_idx = xlsw_write_row(ws, row_idx, func_data(o))
fn = '%s-%s.xlsx' % (filename, datetime.now())
return export_xlsx(wb, output, fn)
|
vicalloy/lbutils | lbutils/xlsxutils.py | xlsw_write_row | python | def xlsw_write_row(ws, row_idx, row, fmt=None):
for col_idx in range(len(row)):
ws.write(row_idx, col_idx, row[col_idx], fmt)
row_idx += 1
return row_idx | ws:
row_idx: row number
row: a list, data to write
fmt: format for cell | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/xlsxutils.py#L35-L45 | null | from __future__ import unicode_literals
import codecs
from datetime import datetime
from django.http import HttpResponse
from django.utils.six import BytesIO
try:
import xlsxwriter as xlwt
except ImportError:
pass
__all__ = (
'export_xlsx', 'xlsw_write_row', 'simple_export2xlsx',
)
def export_xlsx(wb, output, fn):
"""
export as excel
wb:
output:
fn: file name
"""
wb.close()
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.ms-excel")
cd = codecs.encode('attachment;filename=%s' % fn, 'utf-8')
response['Content-Disposition'] = cd
return response
def simple_export2xlsx(filename, titles, qs, func_data):
"""
export as excel
filename: file name
titles: title for this table
qs: queryset to export
func_data: a function to format object to list. ex: `lambda o: [o.pk, o.name]`
"""
output = BytesIO()
wb = xlwt.Workbook(output)
ws = wb.add_worksheet(filename)
header_fmt = wb.add_format()
header_fmt.set_bg_color('#C4D89E')
row_idx = 0
row_idx = xlsw_write_row(ws, row_idx, titles, header_fmt)
for o in qs:
row_idx = xlsw_write_row(ws, row_idx, func_data(o))
fn = '%s-%s.xlsx' % (filename, datetime.now())
return export_xlsx(wb, output, fn)
|
vicalloy/lbutils | lbutils/xlsxutils.py | simple_export2xlsx | python | def simple_export2xlsx(filename, titles, qs, func_data):
output = BytesIO()
wb = xlwt.Workbook(output)
ws = wb.add_worksheet(filename)
header_fmt = wb.add_format()
header_fmt.set_bg_color('#C4D89E')
row_idx = 0
row_idx = xlsw_write_row(ws, row_idx, titles, header_fmt)
for o in qs:
row_idx = xlsw_write_row(ws, row_idx, func_data(o))
fn = '%s-%s.xlsx' % (filename, datetime.now())
return export_xlsx(wb, output, fn) | export as excel
filename: file name
titles: title for this table
qs: queryset to export
func_data: a function to format object to list. ex: `lambda o: [o.pk, o.name]` | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/xlsxutils.py#L48-L66 | [
"def export_xlsx(wb, output, fn):\n \"\"\"\n export as excel\n wb:\n output:\n fn: file name\n \"\"\"\n wb.close()\n output.seek(0)\n response = HttpResponse(output.read(), content_type=\"application/vnd.ms-excel\")\n cd = codecs.encode('attachment;filename=%s' % fn, 'utf-8')\n response['Content-Disposition'] = cd\n return response\n",
"def xlsw_write_row(ws, row_idx, row, fmt=None):\n \"\"\"\n ws:\n row_idx: row number\n row: a list, data to write\n fmt: format for cell\n \"\"\"\n for col_idx in range(len(row)):\n ws.write(row_idx, col_idx, row[col_idx], fmt)\n row_idx += 1\n return row_idx\n",
"def func_data(o):\n return [\n o.name,\n o.price\n ]\n"
] | from __future__ import unicode_literals
import codecs
from datetime import datetime
from django.http import HttpResponse
from django.utils.six import BytesIO
try:
import xlsxwriter as xlwt
except ImportError:
pass
__all__ = (
'export_xlsx', 'xlsw_write_row', 'simple_export2xlsx',
)
def export_xlsx(wb, output, fn):
"""
export as excel
wb:
output:
fn: file name
"""
wb.close()
output.seek(0)
response = HttpResponse(output.read(), content_type="application/vnd.ms-excel")
cd = codecs.encode('attachment;filename=%s' % fn, 'utf-8')
response['Content-Disposition'] = cd
return response
def xlsw_write_row(ws, row_idx, row, fmt=None):
"""
ws:
row_idx: row number
row: a list, data to write
fmt: format for cell
"""
for col_idx in range(len(row)):
ws.write(row_idx, col_idx, row[col_idx], fmt)
row_idx += 1
return row_idx
|
vicalloy/lbutils | lbutils/widgets.py | render_hidden | python | def render_hidden(name, value):
if isinstance(value, list):
return MultipleHiddenInput().render(name, value)
return HiddenInput().render(name, value) | render as hidden widget | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/widgets.py#L71-L75 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import chain
import django
from django.utils.encoding import force_text
from django.forms.models import ModelChoiceIterator
from django.utils.html import escape
try:
from django.forms.util import flatatt
except ImportError: # Django >= 1.9
from django.forms.utils import flatatt
from django.forms import Select
from django.forms import SelectMultiple
from django.forms import MultipleHiddenInput, HiddenInput
from django.forms.widgets import Widget, Textarea, CheckboxInput
__all__ = (
'JustSelectedSelect', 'JustSelectedSelectMultiple', 'TextWidget',
)
class JustSelectedMixin(object):
""" only generate selected option """
def render_readonly(self, name, value, attrs):
schoices = self.choices
if isinstance(schoices, ModelChoiceIterator):
if isinstance(value, list):
schoices.queryset = schoices.queryset.filter(pk__in=value)
else:
schoices.queryset = schoices.queryset.filter(pk=value)
for o in schoices:
if "%s" % o[0] == "%s" % value:
return o[1]
return ""
def get_only_selected_choices(self, value):
"""Return a list of optgroups for this widget."""
schoices = self.choices
selected_choices = set([force_text(v) for v in value if v])
if isinstance(schoices, ModelChoiceIterator):
schoices.queryset = schoices.queryset.filter(pk__in=selected_choices)
else:
schoices = [e for e in schoices if force_text(e) in selected_choices]
return schoices
class JustSelectedSelect(JustSelectedMixin, Select):
def optgroups(self, name, value, attrs=None):
self.choices = self.get_only_selected_choices(value)
return super(JustSelectedSelect, self).optgroups(name, value, attrs)
def render_options(self, selected_choices):
self.choices = self.get_only_selected_choices(selected_choices)
return super(JustSelectedSelect, self).render_options(selected_choices)
class JustSelectedSelectMultiple(JustSelectedMixin, SelectMultiple):
""" only generate selected option """
def optgroups(self, name, value, attrs=None):
self.choices = self.get_only_selected_choices(value)
return super(JustSelectedSelectMultiple, self).optgroups(name, value, attrs)
def render_options(self, selected_choices):
self.choices = self.get_only_selected_choices(selected_choices)
return super(JustSelectedSelectMultiple, self).render_options(selected_choices)
class TextWidget(Widget):
""" render as text """
def __init__(self, attrs=None, src_widget=None):
super(TextWidget, self).__init__(attrs)
self.src_widget = src_widget
def gen_output(self, descn, value, name):
return "<span class='text-value'>%s</span> %s" % (descn, render_hidden(name, value))
def value_from_datadict(self, data, files, name):
return self.src_widget.value_from_datadict(data, files, name)
def render(self, name, value, attrs=None):
func_render_readonly = getattr(self.src_widget, 'render_readonly', None)
if func_render_readonly:
descn = func_render_readonly(name, value, attrs)
return self.gen_output(descn, value, name)
if isinstance(self.src_widget, Select):
if isinstance(value, list):
values = ["%s" % e for e in value]
else:
values = ["%s" % value]
descns = []
for v, descn in self.src_widget.choices:
if "%s" % v in values:
descns.append(descn)
return self.gen_output(','.join(descns), value, name)
if isinstance(self.src_widget, CheckboxInput):
descn = u'√' if value else u'×'
return self.gen_output(descn, value, name)
descn = '' if value is None else '%s' % value
# TODO if in pre element
if isinstance(self.src_widget, Textarea):
descn = value
return self.gen_output(descn, value, name)
|
vicalloy/lbutils | lbutils/widgets.py | JustSelectedMixin.get_only_selected_choices | python | def get_only_selected_choices(self, value):
schoices = self.choices
selected_choices = set([force_text(v) for v in value if v])
if isinstance(schoices, ModelChoiceIterator):
schoices.queryset = schoices.queryset.filter(pk__in=selected_choices)
else:
schoices = [e for e in schoices if force_text(e) in selected_choices]
return schoices | Return a list of optgroups for this widget. | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/widgets.py#L38-L46 | null | class JustSelectedMixin(object):
""" only generate selected option """
def render_readonly(self, name, value, attrs):
schoices = self.choices
if isinstance(schoices, ModelChoiceIterator):
if isinstance(value, list):
schoices.queryset = schoices.queryset.filter(pk__in=value)
else:
schoices.queryset = schoices.queryset.filter(pk=value)
for o in schoices:
if "%s" % o[0] == "%s" % value:
return o[1]
return ""
|
vicalloy/lbutils | lbutils/views.py | qdict_get_list | python | def qdict_get_list(qdict, k):
pks = qdict.getlist(k)
return [e for e in pks if e] | get list from QueryDict and remove blank date from list. | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/views.py#L20-L25 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.http import HttpResponse
try:
from crispy_forms.helper import FormHelper
except ImportError:
pass
__all__ = (
'qdict_get_list', 'request_get_next', 'save_formset',
'render_json', 'has_any_perm', 'get_post_data',
'get_client_ip', 'create_formset', 'forms_is_valid',
)
def request_get_next(request, default_next):
"""
get next url form request
order: POST.next GET.next HTTP_REFERER, default_next
"""
next_url = request.POST.get('next')\
or request.GET.get('next')\
or request.META.get('HTTP_REFERER')\
or default_next
return next_url
def save_formset(formset, ext_vals):
formset.save(commit=False)
for f in formset.saved_forms:
o = f.save(commit=False)
for k, v in ext_vals.items():
setattr(o, k, v)
o.save()
def render_json(data, ensure_ascii=False, request=None, as_text=False):
fmt = 'json'
content_type = "application/json"
if as_text:
content_type = "text/html"
plain = json.dumps(data, ensure_ascii=False)
if request:
fmt = request.GET.get('fmt', 'json')
if fmt == "jsonp":
jsonp_cb = request.GET.get('callback', 'callback')
# content_type = "application/javascript"
plain = "%s(%s);" % (jsonp_cb, plain)
return HttpResponse(plain, content_type=content_type)
def has_any_perm(user, perms):
if not isinstance(perms, [list, tuple]):
perms = [e.strip() for e in perms.split(',') if e.strip()]
for p in perms:
if user.has_perm(p):
return True
return False
def get_post_data(request):
""" get request.POST, if POST is empty return None """
if request.method == 'POST':
return request.POST
return None
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def create_formset(
formset_class, prefix,
template_prefix=None, **kwargs):
helper = FormHelper()
project_name = ''
template = 'bootstrap3/table_inline_formset.html'
if template_prefix:
template = "%s/%s" % (template_prefix, template)
helper.template = template
formset = formset_class(prefix=prefix, **kwargs)
formset.helper = helper
return formset
def forms_is_valid(forms):
return all([form.is_valid() for form in forms])
|
vicalloy/lbutils | lbutils/views.py | request_get_next | python | def request_get_next(request, default_next):
next_url = request.POST.get('next')\
or request.GET.get('next')\
or request.META.get('HTTP_REFERER')\
or default_next
return next_url | get next url form request
order: POST.next GET.next HTTP_REFERER, default_next | train | https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/views.py#L28-L38 | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.http import HttpResponse
try:
from crispy_forms.helper import FormHelper
except ImportError:
pass
__all__ = (
'qdict_get_list', 'request_get_next', 'save_formset',
'render_json', 'has_any_perm', 'get_post_data',
'get_client_ip', 'create_formset', 'forms_is_valid',
)
def qdict_get_list(qdict, k):
"""
get list from QueryDict and remove blank date from list.
"""
pks = qdict.getlist(k)
return [e for e in pks if e]
def save_formset(formset, ext_vals):
formset.save(commit=False)
for f in formset.saved_forms:
o = f.save(commit=False)
for k, v in ext_vals.items():
setattr(o, k, v)
o.save()
def render_json(data, ensure_ascii=False, request=None, as_text=False):
fmt = 'json'
content_type = "application/json"
if as_text:
content_type = "text/html"
plain = json.dumps(data, ensure_ascii=False)
if request:
fmt = request.GET.get('fmt', 'json')
if fmt == "jsonp":
jsonp_cb = request.GET.get('callback', 'callback')
# content_type = "application/javascript"
plain = "%s(%s);" % (jsonp_cb, plain)
return HttpResponse(plain, content_type=content_type)
def has_any_perm(user, perms):
if not isinstance(perms, [list, tuple]):
perms = [e.strip() for e in perms.split(',') if e.strip()]
for p in perms:
if user.has_perm(p):
return True
return False
def get_post_data(request):
""" get request.POST, if POST is empty return None """
if request.method == 'POST':
return request.POST
return None
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def create_formset(
formset_class, prefix,
template_prefix=None, **kwargs):
helper = FormHelper()
project_name = ''
template = 'bootstrap3/table_inline_formset.html'
if template_prefix:
template = "%s/%s" % (template_prefix, template)
helper.template = template
formset = formset_class(prefix=prefix, **kwargs)
formset.helper = helper
return formset
def forms_is_valid(forms):
return all([form.is_valid() for form in forms])
|
hfaran/Tornado-JSON | tornado_json/api_doc_gen.py | _validate_example | python | def _validate_example(rh, method, example_type):
example = getattr(method, example_type + "_example")
schema = getattr(method, example_type + "_schema")
if example is None:
return None
try:
validate(example, schema)
except ValidationError as e:
raise ValidationError(
"{}_example for {}.{} could not be validated.\n{}".format(
example_type, rh.__name__, method.__name__, str(e)
)
)
return json.dumps(example, indent=4, sort_keys=True) | Validates example against schema
:returns: Formatted example if example exists and validates, otherwise None
:raises ValidationError: If example does not validate against the schema | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/api_doc_gen.py#L18-L39 | null | import json
import inspect
import re
try:
from itertools import imap as map # PY2
except ImportError:
pass
import tornado.web
from jsonschema import ValidationError, validate
from tornado_json.utils import extract_method, is_method
from tornado_json.constants import HTTP_METHODS
from tornado_json.requesthandlers import APIHandler
def _get_rh_methods(rh):
"""Yield all HTTP methods in ``rh`` that are decorated
with schema.validate"""
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v)
def _get_tuple_from_route(route):
"""Return (pattern, handler_class, methods) tuple from ``route``
:type route: tuple|tornado.web.URLSpec
:rtype: tuple
:raises TypeError: If ``route`` is not a tuple or URLSpec
"""
if isinstance(route, tuple):
assert len(route) >= 2
pattern, handler_class = route[:2]
elif isinstance(route, tornado.web.URLSpec):
pattern, handler_class = route.regex.pattern, route.handler_class
else:
raise TypeError("Unknown route type '{}'"
.format(type(route).__name__))
methods = []
route_re = re.compile(pattern)
route_params = set(list(route_re.groupindex.keys()) + ['self'])
for http_method in HTTP_METHODS:
method = getattr(handler_class, http_method, None)
if method:
method = extract_method(method)
method_params = set(getattr(method, "__argspec_args",
inspect.getargspec(method).args))
if route_params.issubset(method_params) and \
method_params.issubset(route_params):
methods.append(http_method)
return pattern, handler_class, methods
def _escape_markdown_literals(string):
"""Escape any markdown literals in ``string`` by prepending with \\
:type string: str
:rtype: str
"""
literals = list("\\`*_{}[]()<>#+-.!:|")
escape = lambda c: '\\' + c if c in literals else c
return "".join(map(escape, string))
def _cleandoc(doc):
"""Remove uniform indents from ``doc`` lines that are not empty
:returns: Cleaned ``doc``
"""
indent_length = lambda s: len(s) - len(s.lstrip(" "))
not_empty = lambda s: s != ""
lines = doc.split("\n")
indent = min(map(indent_length, filter(not_empty, lines)))
return "\n".join(s[indent:] for s in lines)
def _add_indent(string, indent):
"""Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks
"""
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines)
def _get_example_doc(rh, method, type):
assert type in ("input", "output")
example = _validate_example(rh, method, type)
if not example:
return ""
res = """
**{type} Example**
```json
{example}
```
""".format(
type=type.capitalize(),
example=_add_indent(example, 4)
)
return _cleandoc(res)
def _get_input_example(rh, method):
return _get_example_doc(rh, method, "input")
def _get_output_example(rh, method):
return _get_example_doc(rh, method, "output")
def _get_schema_doc(schema, type):
res = """
**{type} Schema**
```json
{schema}
```
""".format(
schema=_add_indent(json.dumps(schema, indent=4, sort_keys=True), 4),
type=type.capitalize()
)
return _cleandoc(res)
def _get_input_schema_doc(method):
return _get_schema_doc(method.input_schema, "input")
def _get_output_schema_doc(method):
return _get_schema_doc(method.output_schema, "output")
def _get_notes(method):
doc = inspect.getdoc(method)
if doc is None:
return None
res = """
**Notes**
{}
""".format(_add_indent(doc, 4))
return _cleandoc(res)
def _get_method_doc(rh, method_name, method):
res = """## {method_name}
{input_schema}
{input_example}
{output_schema}
{output_example}
{notes}
""".format(
method_name=method_name.upper(),
input_schema=_get_input_schema_doc(method),
output_schema=_get_output_schema_doc(method),
notes=_get_notes(method) or "",
input_example=_get_input_example(rh, method),
output_example=_get_output_example(rh, method),
)
return _cleandoc("\n".join([l.rstrip() for l in res.splitlines()]))
def _get_rh_doc(rh, methods):
res = "\n\n".join([_get_method_doc(rh, method_name, method)
for method_name, method in _get_rh_methods(rh)
if method_name in methods])
return res
def _get_content_type(rh):
# XXX: Content-type is hard-coded but ideally should be retrieved;
# the hard part is, we don't know what it is without initializing
# an instance, so just leave as-is for now
return "Content-Type: application/json"
def _get_route_doc(url, rh, methods):
route_doc = """
# {route_pattern}
{content_type}
{rh_doc}
""".format(
route_pattern=_escape_markdown_literals(url),
content_type=_get_content_type(rh),
rh_doc=_add_indent(_get_rh_doc(rh, methods), 4)
)
return _cleandoc(route_doc)
def _write_docs_to_file(documentation):
# Documentation is written to the root folder
with open("API_Documentation.md", "w+") as f:
f.write(documentation)
def get_api_docs(routes):
"""
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
"""
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation
def api_doc_gen(routes):
"""Get and write API documentation for ``routes`` to file"""
documentation = get_api_docs(routes)
_write_docs_to_file(documentation)
|
hfaran/Tornado-JSON | tornado_json/api_doc_gen.py | _get_rh_methods | python | def _get_rh_methods(rh):
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v) | Yield all HTTP methods in ``rh`` that are decorated
with schema.validate | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/api_doc_gen.py#L42-L51 | null | import json
import inspect
import re
try:
from itertools import imap as map # PY2
except ImportError:
pass
import tornado.web
from jsonschema import ValidationError, validate
from tornado_json.utils import extract_method, is_method
from tornado_json.constants import HTTP_METHODS
from tornado_json.requesthandlers import APIHandler
def _validate_example(rh, method, example_type):
"""Validates example against schema
:returns: Formatted example if example exists and validates, otherwise None
:raises ValidationError: If example does not validate against the schema
"""
example = getattr(method, example_type + "_example")
schema = getattr(method, example_type + "_schema")
if example is None:
return None
try:
validate(example, schema)
except ValidationError as e:
raise ValidationError(
"{}_example for {}.{} could not be validated.\n{}".format(
example_type, rh.__name__, method.__name__, str(e)
)
)
return json.dumps(example, indent=4, sort_keys=True)
def _get_tuple_from_route(route):
"""Return (pattern, handler_class, methods) tuple from ``route``
:type route: tuple|tornado.web.URLSpec
:rtype: tuple
:raises TypeError: If ``route`` is not a tuple or URLSpec
"""
if isinstance(route, tuple):
assert len(route) >= 2
pattern, handler_class = route[:2]
elif isinstance(route, tornado.web.URLSpec):
pattern, handler_class = route.regex.pattern, route.handler_class
else:
raise TypeError("Unknown route type '{}'"
.format(type(route).__name__))
methods = []
route_re = re.compile(pattern)
route_params = set(list(route_re.groupindex.keys()) + ['self'])
for http_method in HTTP_METHODS:
method = getattr(handler_class, http_method, None)
if method:
method = extract_method(method)
method_params = set(getattr(method, "__argspec_args",
inspect.getargspec(method).args))
if route_params.issubset(method_params) and \
method_params.issubset(route_params):
methods.append(http_method)
return pattern, handler_class, methods
def _escape_markdown_literals(string):
"""Escape any markdown literals in ``string`` by prepending with \\
:type string: str
:rtype: str
"""
literals = list("\\`*_{}[]()<>#+-.!:|")
escape = lambda c: '\\' + c if c in literals else c
return "".join(map(escape, string))
def _cleandoc(doc):
"""Remove uniform indents from ``doc`` lines that are not empty
:returns: Cleaned ``doc``
"""
indent_length = lambda s: len(s) - len(s.lstrip(" "))
not_empty = lambda s: s != ""
lines = doc.split("\n")
indent = min(map(indent_length, filter(not_empty, lines)))
return "\n".join(s[indent:] for s in lines)
def _add_indent(string, indent):
"""Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks
"""
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines)
def _get_example_doc(rh, method, type):
assert type in ("input", "output")
example = _validate_example(rh, method, type)
if not example:
return ""
res = """
**{type} Example**
```json
{example}
```
""".format(
type=type.capitalize(),
example=_add_indent(example, 4)
)
return _cleandoc(res)
def _get_input_example(rh, method):
return _get_example_doc(rh, method, "input")
def _get_output_example(rh, method):
return _get_example_doc(rh, method, "output")
def _get_schema_doc(schema, type):
res = """
**{type} Schema**
```json
{schema}
```
""".format(
schema=_add_indent(json.dumps(schema, indent=4, sort_keys=True), 4),
type=type.capitalize()
)
return _cleandoc(res)
def _get_input_schema_doc(method):
return _get_schema_doc(method.input_schema, "input")
def _get_output_schema_doc(method):
return _get_schema_doc(method.output_schema, "output")
def _get_notes(method):
doc = inspect.getdoc(method)
if doc is None:
return None
res = """
**Notes**
{}
""".format(_add_indent(doc, 4))
return _cleandoc(res)
def _get_method_doc(rh, method_name, method):
res = """## {method_name}
{input_schema}
{input_example}
{output_schema}
{output_example}
{notes}
""".format(
method_name=method_name.upper(),
input_schema=_get_input_schema_doc(method),
output_schema=_get_output_schema_doc(method),
notes=_get_notes(method) or "",
input_example=_get_input_example(rh, method),
output_example=_get_output_example(rh, method),
)
return _cleandoc("\n".join([l.rstrip() for l in res.splitlines()]))
def _get_rh_doc(rh, methods):
res = "\n\n".join([_get_method_doc(rh, method_name, method)
for method_name, method in _get_rh_methods(rh)
if method_name in methods])
return res
def _get_content_type(rh):
# XXX: Content-type is hard-coded but ideally should be retrieved;
# the hard part is, we don't know what it is without initializing
# an instance, so just leave as-is for now
return "Content-Type: application/json"
def _get_route_doc(url, rh, methods):
route_doc = """
# {route_pattern}
{content_type}
{rh_doc}
""".format(
route_pattern=_escape_markdown_literals(url),
content_type=_get_content_type(rh),
rh_doc=_add_indent(_get_rh_doc(rh, methods), 4)
)
return _cleandoc(route_doc)
def _write_docs_to_file(documentation):
# Documentation is written to the root folder
with open("API_Documentation.md", "w+") as f:
f.write(documentation)
def get_api_docs(routes):
"""
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
"""
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation
def api_doc_gen(routes):
"""Get and write API documentation for ``routes`` to file"""
documentation = get_api_docs(routes)
_write_docs_to_file(documentation)
|
hfaran/Tornado-JSON | tornado_json/api_doc_gen.py | _get_tuple_from_route | python | def _get_tuple_from_route(route):
if isinstance(route, tuple):
assert len(route) >= 2
pattern, handler_class = route[:2]
elif isinstance(route, tornado.web.URLSpec):
pattern, handler_class = route.regex.pattern, route.handler_class
else:
raise TypeError("Unknown route type '{}'"
.format(type(route).__name__))
methods = []
route_re = re.compile(pattern)
route_params = set(list(route_re.groupindex.keys()) + ['self'])
for http_method in HTTP_METHODS:
method = getattr(handler_class, http_method, None)
if method:
method = extract_method(method)
method_params = set(getattr(method, "__argspec_args",
inspect.getargspec(method).args))
if route_params.issubset(method_params) and \
method_params.issubset(route_params):
methods.append(http_method)
return pattern, handler_class, methods | Return (pattern, handler_class, methods) tuple from ``route``
:type route: tuple|tornado.web.URLSpec
:rtype: tuple
:raises TypeError: If ``route`` is not a tuple or URLSpec | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/api_doc_gen.py#L54-L83 | [
"def extract_method(wrapped_method):\n \"\"\"Gets original method if wrapped_method was decorated\n\n :rtype: any([types.FunctionType, types.MethodType])\n \"\"\"\n # If method was decorated with validate, the original method\n # is available as orig_func thanks to our container decorator\n return wrapped_method.orig_func if \\\n hasattr(wrapped_method, \"orig_func\") else wrapped_method\n"
] | import json
import inspect
import re
try:
from itertools import imap as map # PY2
except ImportError:
pass
import tornado.web
from jsonschema import ValidationError, validate
from tornado_json.utils import extract_method, is_method
from tornado_json.constants import HTTP_METHODS
from tornado_json.requesthandlers import APIHandler
def _validate_example(rh, method, example_type):
"""Validates example against schema
:returns: Formatted example if example exists and validates, otherwise None
:raises ValidationError: If example does not validate against the schema
"""
example = getattr(method, example_type + "_example")
schema = getattr(method, example_type + "_schema")
if example is None:
return None
try:
validate(example, schema)
except ValidationError as e:
raise ValidationError(
"{}_example for {}.{} could not be validated.\n{}".format(
example_type, rh.__name__, method.__name__, str(e)
)
)
return json.dumps(example, indent=4, sort_keys=True)
def _get_rh_methods(rh):
"""Yield all HTTP methods in ``rh`` that are decorated
with schema.validate"""
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v)
def _escape_markdown_literals(string):
"""Escape any markdown literals in ``string`` by prepending with \\
:type string: str
:rtype: str
"""
literals = list("\\`*_{}[]()<>#+-.!:|")
escape = lambda c: '\\' + c if c in literals else c
return "".join(map(escape, string))
def _cleandoc(doc):
"""Remove uniform indents from ``doc`` lines that are not empty
:returns: Cleaned ``doc``
"""
indent_length = lambda s: len(s) - len(s.lstrip(" "))
not_empty = lambda s: s != ""
lines = doc.split("\n")
indent = min(map(indent_length, filter(not_empty, lines)))
return "\n".join(s[indent:] for s in lines)
def _add_indent(string, indent):
"""Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks
"""
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines)
def _get_example_doc(rh, method, type):
assert type in ("input", "output")
example = _validate_example(rh, method, type)
if not example:
return ""
res = """
**{type} Example**
```json
{example}
```
""".format(
type=type.capitalize(),
example=_add_indent(example, 4)
)
return _cleandoc(res)
def _get_input_example(rh, method):
return _get_example_doc(rh, method, "input")
def _get_output_example(rh, method):
return _get_example_doc(rh, method, "output")
def _get_schema_doc(schema, type):
res = """
**{type} Schema**
```json
{schema}
```
""".format(
schema=_add_indent(json.dumps(schema, indent=4, sort_keys=True), 4),
type=type.capitalize()
)
return _cleandoc(res)
def _get_input_schema_doc(method):
return _get_schema_doc(method.input_schema, "input")
def _get_output_schema_doc(method):
return _get_schema_doc(method.output_schema, "output")
def _get_notes(method):
doc = inspect.getdoc(method)
if doc is None:
return None
res = """
**Notes**
{}
""".format(_add_indent(doc, 4))
return _cleandoc(res)
def _get_method_doc(rh, method_name, method):
res = """## {method_name}
{input_schema}
{input_example}
{output_schema}
{output_example}
{notes}
""".format(
method_name=method_name.upper(),
input_schema=_get_input_schema_doc(method),
output_schema=_get_output_schema_doc(method),
notes=_get_notes(method) or "",
input_example=_get_input_example(rh, method),
output_example=_get_output_example(rh, method),
)
return _cleandoc("\n".join([l.rstrip() for l in res.splitlines()]))
def _get_rh_doc(rh, methods):
res = "\n\n".join([_get_method_doc(rh, method_name, method)
for method_name, method in _get_rh_methods(rh)
if method_name in methods])
return res
def _get_content_type(rh):
# XXX: Content-type is hard-coded but ideally should be retrieved;
# the hard part is, we don't know what it is without initializing
# an instance, so just leave as-is for now
return "Content-Type: application/json"
def _get_route_doc(url, rh, methods):
route_doc = """
# {route_pattern}
{content_type}
{rh_doc}
""".format(
route_pattern=_escape_markdown_literals(url),
content_type=_get_content_type(rh),
rh_doc=_add_indent(_get_rh_doc(rh, methods), 4)
)
return _cleandoc(route_doc)
def _write_docs_to_file(documentation):
# Documentation is written to the root folder
with open("API_Documentation.md", "w+") as f:
f.write(documentation)
def get_api_docs(routes):
"""
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
"""
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation
def api_doc_gen(routes):
"""Get and write API documentation for ``routes`` to file"""
documentation = get_api_docs(routes)
_write_docs_to_file(documentation)
|
hfaran/Tornado-JSON | tornado_json/api_doc_gen.py | _escape_markdown_literals | python | def _escape_markdown_literals(string):
literals = list("\\`*_{}[]()<>#+-.!:|")
escape = lambda c: '\\' + c if c in literals else c
return "".join(map(escape, string)) | Escape any markdown literals in ``string`` by prepending with \\
:type string: str
:rtype: str | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/api_doc_gen.py#L86-L94 | null | import json
import inspect
import re
try:
from itertools import imap as map # PY2
except ImportError:
pass
import tornado.web
from jsonschema import ValidationError, validate
from tornado_json.utils import extract_method, is_method
from tornado_json.constants import HTTP_METHODS
from tornado_json.requesthandlers import APIHandler
def _validate_example(rh, method, example_type):
"""Validates example against schema
:returns: Formatted example if example exists and validates, otherwise None
:raises ValidationError: If example does not validate against the schema
"""
example = getattr(method, example_type + "_example")
schema = getattr(method, example_type + "_schema")
if example is None:
return None
try:
validate(example, schema)
except ValidationError as e:
raise ValidationError(
"{}_example for {}.{} could not be validated.\n{}".format(
example_type, rh.__name__, method.__name__, str(e)
)
)
return json.dumps(example, indent=4, sort_keys=True)
def _get_rh_methods(rh):
"""Yield all HTTP methods in ``rh`` that are decorated
with schema.validate"""
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v)
def _get_tuple_from_route(route):
"""Return (pattern, handler_class, methods) tuple from ``route``
:type route: tuple|tornado.web.URLSpec
:rtype: tuple
:raises TypeError: If ``route`` is not a tuple or URLSpec
"""
if isinstance(route, tuple):
assert len(route) >= 2
pattern, handler_class = route[:2]
elif isinstance(route, tornado.web.URLSpec):
pattern, handler_class = route.regex.pattern, route.handler_class
else:
raise TypeError("Unknown route type '{}'"
.format(type(route).__name__))
methods = []
route_re = re.compile(pattern)
route_params = set(list(route_re.groupindex.keys()) + ['self'])
for http_method in HTTP_METHODS:
method = getattr(handler_class, http_method, None)
if method:
method = extract_method(method)
method_params = set(getattr(method, "__argspec_args",
inspect.getargspec(method).args))
if route_params.issubset(method_params) and \
method_params.issubset(route_params):
methods.append(http_method)
return pattern, handler_class, methods
def _cleandoc(doc):
"""Remove uniform indents from ``doc`` lines that are not empty
:returns: Cleaned ``doc``
"""
indent_length = lambda s: len(s) - len(s.lstrip(" "))
not_empty = lambda s: s != ""
lines = doc.split("\n")
indent = min(map(indent_length, filter(not_empty, lines)))
return "\n".join(s[indent:] for s in lines)
def _add_indent(string, indent):
"""Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks
"""
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines)
def _get_example_doc(rh, method, type):
assert type in ("input", "output")
example = _validate_example(rh, method, type)
if not example:
return ""
res = """
**{type} Example**
```json
{example}
```
""".format(
type=type.capitalize(),
example=_add_indent(example, 4)
)
return _cleandoc(res)
def _get_input_example(rh, method):
return _get_example_doc(rh, method, "input")
def _get_output_example(rh, method):
return _get_example_doc(rh, method, "output")
def _get_schema_doc(schema, type):
res = """
**{type} Schema**
```json
{schema}
```
""".format(
schema=_add_indent(json.dumps(schema, indent=4, sort_keys=True), 4),
type=type.capitalize()
)
return _cleandoc(res)
def _get_input_schema_doc(method):
return _get_schema_doc(method.input_schema, "input")
def _get_output_schema_doc(method):
return _get_schema_doc(method.output_schema, "output")
def _get_notes(method):
doc = inspect.getdoc(method)
if doc is None:
return None
res = """
**Notes**
{}
""".format(_add_indent(doc, 4))
return _cleandoc(res)
def _get_method_doc(rh, method_name, method):
res = """## {method_name}
{input_schema}
{input_example}
{output_schema}
{output_example}
{notes}
""".format(
method_name=method_name.upper(),
input_schema=_get_input_schema_doc(method),
output_schema=_get_output_schema_doc(method),
notes=_get_notes(method) or "",
input_example=_get_input_example(rh, method),
output_example=_get_output_example(rh, method),
)
return _cleandoc("\n".join([l.rstrip() for l in res.splitlines()]))
def _get_rh_doc(rh, methods):
res = "\n\n".join([_get_method_doc(rh, method_name, method)
for method_name, method in _get_rh_methods(rh)
if method_name in methods])
return res
def _get_content_type(rh):
# XXX: Content-type is hard-coded but ideally should be retrieved;
# the hard part is, we don't know what it is without initializing
# an instance, so just leave as-is for now
return "Content-Type: application/json"
def _get_route_doc(url, rh, methods):
route_doc = """
# {route_pattern}
{content_type}
{rh_doc}
""".format(
route_pattern=_escape_markdown_literals(url),
content_type=_get_content_type(rh),
rh_doc=_add_indent(_get_rh_doc(rh, methods), 4)
)
return _cleandoc(route_doc)
def _write_docs_to_file(documentation):
# Documentation is written to the root folder
with open("API_Documentation.md", "w+") as f:
f.write(documentation)
def get_api_docs(routes):
"""
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
"""
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation
def api_doc_gen(routes):
"""Get and write API documentation for ``routes`` to file"""
documentation = get_api_docs(routes)
_write_docs_to_file(documentation)
|
hfaran/Tornado-JSON | tornado_json/api_doc_gen.py | _cleandoc | python | def _cleandoc(doc):
indent_length = lambda s: len(s) - len(s.lstrip(" "))
not_empty = lambda s: s != ""
lines = doc.split("\n")
indent = min(map(indent_length, filter(not_empty, lines)))
return "\n".join(s[indent:] for s in lines) | Remove uniform indents from ``doc`` lines that are not empty
:returns: Cleaned ``doc`` | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/api_doc_gen.py#L97-L108 | null | import json
import inspect
import re
try:
from itertools import imap as map # PY2
except ImportError:
pass
import tornado.web
from jsonschema import ValidationError, validate
from tornado_json.utils import extract_method, is_method
from tornado_json.constants import HTTP_METHODS
from tornado_json.requesthandlers import APIHandler
def _validate_example(rh, method, example_type):
"""Validates example against schema
:returns: Formatted example if example exists and validates, otherwise None
:raises ValidationError: If example does not validate against the schema
"""
example = getattr(method, example_type + "_example")
schema = getattr(method, example_type + "_schema")
if example is None:
return None
try:
validate(example, schema)
except ValidationError as e:
raise ValidationError(
"{}_example for {}.{} could not be validated.\n{}".format(
example_type, rh.__name__, method.__name__, str(e)
)
)
return json.dumps(example, indent=4, sort_keys=True)
def _get_rh_methods(rh):
"""Yield all HTTP methods in ``rh`` that are decorated
with schema.validate"""
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v)
def _get_tuple_from_route(route):
"""Return (pattern, handler_class, methods) tuple from ``route``
:type route: tuple|tornado.web.URLSpec
:rtype: tuple
:raises TypeError: If ``route`` is not a tuple or URLSpec
"""
if isinstance(route, tuple):
assert len(route) >= 2
pattern, handler_class = route[:2]
elif isinstance(route, tornado.web.URLSpec):
pattern, handler_class = route.regex.pattern, route.handler_class
else:
raise TypeError("Unknown route type '{}'"
.format(type(route).__name__))
methods = []
route_re = re.compile(pattern)
route_params = set(list(route_re.groupindex.keys()) + ['self'])
for http_method in HTTP_METHODS:
method = getattr(handler_class, http_method, None)
if method:
method = extract_method(method)
method_params = set(getattr(method, "__argspec_args",
inspect.getargspec(method).args))
if route_params.issubset(method_params) and \
method_params.issubset(route_params):
methods.append(http_method)
return pattern, handler_class, methods
def _escape_markdown_literals(string):
"""Escape any markdown literals in ``string`` by prepending with \\
:type string: str
:rtype: str
"""
literals = list("\\`*_{}[]()<>#+-.!:|")
escape = lambda c: '\\' + c if c in literals else c
return "".join(map(escape, string))
def _add_indent(string, indent):
"""Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks
"""
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines)
def _get_example_doc(rh, method, type):
assert type in ("input", "output")
example = _validate_example(rh, method, type)
if not example:
return ""
res = """
**{type} Example**
```json
{example}
```
""".format(
type=type.capitalize(),
example=_add_indent(example, 4)
)
return _cleandoc(res)
def _get_input_example(rh, method):
return _get_example_doc(rh, method, "input")
def _get_output_example(rh, method):
return _get_example_doc(rh, method, "output")
def _get_schema_doc(schema, type):
res = """
**{type} Schema**
```json
{schema}
```
""".format(
schema=_add_indent(json.dumps(schema, indent=4, sort_keys=True), 4),
type=type.capitalize()
)
return _cleandoc(res)
def _get_input_schema_doc(method):
return _get_schema_doc(method.input_schema, "input")
def _get_output_schema_doc(method):
return _get_schema_doc(method.output_schema, "output")
def _get_notes(method):
doc = inspect.getdoc(method)
if doc is None:
return None
res = """
**Notes**
{}
""".format(_add_indent(doc, 4))
return _cleandoc(res)
def _get_method_doc(rh, method_name, method):
res = """## {method_name}
{input_schema}
{input_example}
{output_schema}
{output_example}
{notes}
""".format(
method_name=method_name.upper(),
input_schema=_get_input_schema_doc(method),
output_schema=_get_output_schema_doc(method),
notes=_get_notes(method) or "",
input_example=_get_input_example(rh, method),
output_example=_get_output_example(rh, method),
)
return _cleandoc("\n".join([l.rstrip() for l in res.splitlines()]))
def _get_rh_doc(rh, methods):
res = "\n\n".join([_get_method_doc(rh, method_name, method)
for method_name, method in _get_rh_methods(rh)
if method_name in methods])
return res
def _get_content_type(rh):
# XXX: Content-type is hard-coded but ideally should be retrieved;
# the hard part is, we don't know what it is without initializing
# an instance, so just leave as-is for now
return "Content-Type: application/json"
def _get_route_doc(url, rh, methods):
route_doc = """
# {route_pattern}
{content_type}
{rh_doc}
""".format(
route_pattern=_escape_markdown_literals(url),
content_type=_get_content_type(rh),
rh_doc=_add_indent(_get_rh_doc(rh, methods), 4)
)
return _cleandoc(route_doc)
def _write_docs_to_file(documentation):
# Documentation is written to the root folder
with open("API_Documentation.md", "w+") as f:
f.write(documentation)
def get_api_docs(routes):
"""
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
"""
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation
def api_doc_gen(routes):
"""Get and write API documentation for ``routes`` to file"""
documentation = get_api_docs(routes)
_write_docs_to_file(documentation)
|
hfaran/Tornado-JSON | tornado_json/api_doc_gen.py | _add_indent | python | def _add_indent(string, indent):
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines) | Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/api_doc_gen.py#L111-L121 | null | import json
import inspect
import re
try:
from itertools import imap as map # PY2
except ImportError:
pass
import tornado.web
from jsonschema import ValidationError, validate
from tornado_json.utils import extract_method, is_method
from tornado_json.constants import HTTP_METHODS
from tornado_json.requesthandlers import APIHandler
def _validate_example(rh, method, example_type):
"""Validates example against schema
:returns: Formatted example if example exists and validates, otherwise None
:raises ValidationError: If example does not validate against the schema
"""
example = getattr(method, example_type + "_example")
schema = getattr(method, example_type + "_schema")
if example is None:
return None
try:
validate(example, schema)
except ValidationError as e:
raise ValidationError(
"{}_example for {}.{} could not be validated.\n{}".format(
example_type, rh.__name__, method.__name__, str(e)
)
)
return json.dumps(example, indent=4, sort_keys=True)
def _get_rh_methods(rh):
"""Yield all HTTP methods in ``rh`` that are decorated
with schema.validate"""
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v)
def _get_tuple_from_route(route):
"""Return (pattern, handler_class, methods) tuple from ``route``
:type route: tuple|tornado.web.URLSpec
:rtype: tuple
:raises TypeError: If ``route`` is not a tuple or URLSpec
"""
if isinstance(route, tuple):
assert len(route) >= 2
pattern, handler_class = route[:2]
elif isinstance(route, tornado.web.URLSpec):
pattern, handler_class = route.regex.pattern, route.handler_class
else:
raise TypeError("Unknown route type '{}'"
.format(type(route).__name__))
methods = []
route_re = re.compile(pattern)
route_params = set(list(route_re.groupindex.keys()) + ['self'])
for http_method in HTTP_METHODS:
method = getattr(handler_class, http_method, None)
if method:
method = extract_method(method)
method_params = set(getattr(method, "__argspec_args",
inspect.getargspec(method).args))
if route_params.issubset(method_params) and \
method_params.issubset(route_params):
methods.append(http_method)
return pattern, handler_class, methods
def _escape_markdown_literals(string):
"""Escape any markdown literals in ``string`` by prepending with \\
:type string: str
:rtype: str
"""
literals = list("\\`*_{}[]()<>#+-.!:|")
escape = lambda c: '\\' + c if c in literals else c
return "".join(map(escape, string))
def _cleandoc(doc):
"""Remove uniform indents from ``doc`` lines that are not empty
:returns: Cleaned ``doc``
"""
indent_length = lambda s: len(s) - len(s.lstrip(" "))
not_empty = lambda s: s != ""
lines = doc.split("\n")
indent = min(map(indent_length, filter(not_empty, lines)))
return "\n".join(s[indent:] for s in lines)
def _get_example_doc(rh, method, type):
assert type in ("input", "output")
example = _validate_example(rh, method, type)
if not example:
return ""
res = """
**{type} Example**
```json
{example}
```
""".format(
type=type.capitalize(),
example=_add_indent(example, 4)
)
return _cleandoc(res)
def _get_input_example(rh, method):
return _get_example_doc(rh, method, "input")
def _get_output_example(rh, method):
return _get_example_doc(rh, method, "output")
def _get_schema_doc(schema, type):
res = """
**{type} Schema**
```json
{schema}
```
""".format(
schema=_add_indent(json.dumps(schema, indent=4, sort_keys=True), 4),
type=type.capitalize()
)
return _cleandoc(res)
def _get_input_schema_doc(method):
return _get_schema_doc(method.input_schema, "input")
def _get_output_schema_doc(method):
return _get_schema_doc(method.output_schema, "output")
def _get_notes(method):
doc = inspect.getdoc(method)
if doc is None:
return None
res = """
**Notes**
{}
""".format(_add_indent(doc, 4))
return _cleandoc(res)
def _get_method_doc(rh, method_name, method):
res = """## {method_name}
{input_schema}
{input_example}
{output_schema}
{output_example}
{notes}
""".format(
method_name=method_name.upper(),
input_schema=_get_input_schema_doc(method),
output_schema=_get_output_schema_doc(method),
notes=_get_notes(method) or "",
input_example=_get_input_example(rh, method),
output_example=_get_output_example(rh, method),
)
return _cleandoc("\n".join([l.rstrip() for l in res.splitlines()]))
def _get_rh_doc(rh, methods):
res = "\n\n".join([_get_method_doc(rh, method_name, method)
for method_name, method in _get_rh_methods(rh)
if method_name in methods])
return res
def _get_content_type(rh):
# XXX: Content-type is hard-coded but ideally should be retrieved;
# the hard part is, we don't know what it is without initializing
# an instance, so just leave as-is for now
return "Content-Type: application/json"
def _get_route_doc(url, rh, methods):
route_doc = """
# {route_pattern}
{content_type}
{rh_doc}
""".format(
route_pattern=_escape_markdown_literals(url),
content_type=_get_content_type(rh),
rh_doc=_add_indent(_get_rh_doc(rh, methods), 4)
)
return _cleandoc(route_doc)
def _write_docs_to_file(documentation):
# Documentation is written to the root folder
with open("API_Documentation.md", "w+") as f:
f.write(documentation)
def get_api_docs(routes):
"""
Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation
"""
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation
def api_doc_gen(routes):
"""Get and write API documentation for ``routes`` to file"""
documentation = get_api_docs(routes)
_write_docs_to_file(documentation)
|
hfaran/Tornado-JSON | tornado_json/api_doc_gen.py | get_api_docs | python | def get_api_docs(routes):
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation | Generates GitHub Markdown formatted API documentation using
provided schemas in RequestHandler methods and their docstrings.
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes (this is ideally all possible routes of the
app)
:rtype: str
:returns: generated GFM-formatted documentation | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/api_doc_gen.py#L237-L260 | [
"def _get_route_doc(url, rh, methods):\n route_doc = \"\"\"\n # {route_pattern}\n\n {content_type}\n\n {rh_doc}\n \"\"\".format(\n route_pattern=_escape_markdown_literals(url),\n content_type=_get_content_type(rh),\n rh_doc=_add_indent(_get_rh_doc(rh, methods), 4)\n )\n return _cleandoc(route_doc)\n"
] | import json
import inspect
import re
try:
from itertools import imap as map # PY2
except ImportError:
pass
import tornado.web
from jsonschema import ValidationError, validate
from tornado_json.utils import extract_method, is_method
from tornado_json.constants import HTTP_METHODS
from tornado_json.requesthandlers import APIHandler
def _validate_example(rh, method, example_type):
"""Validates example against schema
:returns: Formatted example if example exists and validates, otherwise None
:raises ValidationError: If example does not validate against the schema
"""
example = getattr(method, example_type + "_example")
schema = getattr(method, example_type + "_schema")
if example is None:
return None
try:
validate(example, schema)
except ValidationError as e:
raise ValidationError(
"{}_example for {}.{} could not be validated.\n{}".format(
example_type, rh.__name__, method.__name__, str(e)
)
)
return json.dumps(example, indent=4, sort_keys=True)
def _get_rh_methods(rh):
"""Yield all HTTP methods in ``rh`` that are decorated
with schema.validate"""
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v)
def _get_tuple_from_route(route):
"""Return (pattern, handler_class, methods) tuple from ``route``
:type route: tuple|tornado.web.URLSpec
:rtype: tuple
:raises TypeError: If ``route`` is not a tuple or URLSpec
"""
if isinstance(route, tuple):
assert len(route) >= 2
pattern, handler_class = route[:2]
elif isinstance(route, tornado.web.URLSpec):
pattern, handler_class = route.regex.pattern, route.handler_class
else:
raise TypeError("Unknown route type '{}'"
.format(type(route).__name__))
methods = []
route_re = re.compile(pattern)
route_params = set(list(route_re.groupindex.keys()) + ['self'])
for http_method in HTTP_METHODS:
method = getattr(handler_class, http_method, None)
if method:
method = extract_method(method)
method_params = set(getattr(method, "__argspec_args",
inspect.getargspec(method).args))
if route_params.issubset(method_params) and \
method_params.issubset(route_params):
methods.append(http_method)
return pattern, handler_class, methods
def _escape_markdown_literals(string):
"""Escape any markdown literals in ``string`` by prepending with \\
:type string: str
:rtype: str
"""
literals = list("\\`*_{}[]()<>#+-.!:|")
escape = lambda c: '\\' + c if c in literals else c
return "".join(map(escape, string))
def _cleandoc(doc):
"""Remove uniform indents from ``doc`` lines that are not empty
:returns: Cleaned ``doc``
"""
indent_length = lambda s: len(s) - len(s.lstrip(" "))
not_empty = lambda s: s != ""
lines = doc.split("\n")
indent = min(map(indent_length, filter(not_empty, lines)))
return "\n".join(s[indent:] for s in lines)
def _add_indent(string, indent):
"""Add indent of ``indent`` spaces to ``string.split("\n")[1:]``
Useful for formatting in strings to already indented blocks
"""
lines = string.split("\n")
first, lines = lines[0], lines[1:]
lines = ["{indent}{s}".format(indent=" " * indent, s=s)
for s in lines]
lines = [first] + lines
return "\n".join(lines)
def _get_example_doc(rh, method, type):
assert type in ("input", "output")
example = _validate_example(rh, method, type)
if not example:
return ""
res = """
**{type} Example**
```json
{example}
```
""".format(
type=type.capitalize(),
example=_add_indent(example, 4)
)
return _cleandoc(res)
def _get_input_example(rh, method):
return _get_example_doc(rh, method, "input")
def _get_output_example(rh, method):
return _get_example_doc(rh, method, "output")
def _get_schema_doc(schema, type):
res = """
**{type} Schema**
```json
{schema}
```
""".format(
schema=_add_indent(json.dumps(schema, indent=4, sort_keys=True), 4),
type=type.capitalize()
)
return _cleandoc(res)
def _get_input_schema_doc(method):
return _get_schema_doc(method.input_schema, "input")
def _get_output_schema_doc(method):
return _get_schema_doc(method.output_schema, "output")
def _get_notes(method):
doc = inspect.getdoc(method)
if doc is None:
return None
res = """
**Notes**
{}
""".format(_add_indent(doc, 4))
return _cleandoc(res)
def _get_method_doc(rh, method_name, method):
res = """## {method_name}
{input_schema}
{input_example}
{output_schema}
{output_example}
{notes}
""".format(
method_name=method_name.upper(),
input_schema=_get_input_schema_doc(method),
output_schema=_get_output_schema_doc(method),
notes=_get_notes(method) or "",
input_example=_get_input_example(rh, method),
output_example=_get_output_example(rh, method),
)
return _cleandoc("\n".join([l.rstrip() for l in res.splitlines()]))
def _get_rh_doc(rh, methods):
res = "\n\n".join([_get_method_doc(rh, method_name, method)
for method_name, method in _get_rh_methods(rh)
if method_name in methods])
return res
def _get_content_type(rh):
# XXX: Content-type is hard-coded but ideally should be retrieved;
# the hard part is, we don't know what it is without initializing
# an instance, so just leave as-is for now
return "Content-Type: application/json"
def _get_route_doc(url, rh, methods):
route_doc = """
# {route_pattern}
{content_type}
{rh_doc}
""".format(
route_pattern=_escape_markdown_literals(url),
content_type=_get_content_type(rh),
rh_doc=_add_indent(_get_rh_doc(rh, methods), 4)
)
return _cleandoc(route_doc)
def _write_docs_to_file(documentation):
# Documentation is written to the root folder
with open("API_Documentation.md", "w+") as f:
f.write(documentation)
def api_doc_gen(routes):
"""Get and write API documentation for ``routes`` to file"""
documentation = get_api_docs(routes)
_write_docs_to_file(documentation)
|
hfaran/Tornado-JSON | tornado_json/jsend.py | JSendMixin.error | python | def error(self, message, data=None, code=None):
result = {'status': 'error', 'message': message}
if data:
result['data'] = data
if code:
result['code'] = code
self.write(result)
self.finish() | An error occurred in processing the request, i.e. an exception was
thrown.
:type data: A JSON-serializable object
:param data: A generic container for any other information about the
error, i.e. the conditions that caused the error,
stack traces, etc.
:type message: A JSON-serializable object
:param message: A meaningful, end-user-readable (or at the least
log-worthy) message, explaining what went wrong
:type code: int
:param code: A numeric code corresponding to the error, if applicable | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/jsend.py#L35-L55 | null | class JSendMixin(object):
"""http://labs.omniti.com/labs/jsend
JSend is a specification that lays down some rules for how JSON
responses from web servers should be formatted.
JSend focuses on application-level (as opposed to protocol- or
transport-level) messaging which makes it ideal for use in
REST-style applications and APIs.
"""
def success(self, data):
"""When an API call is successful, the JSend object is used as a simple
envelope for the results, using the data key.
:type data: A JSON-serializable object
:param data: Acts as the wrapper for any data returned by the API
call. If the call returns no data, data should be set to null.
"""
self.write({'status': 'success', 'data': data})
self.finish()
def fail(self, data):
"""There was a problem with the data submitted, or some pre-condition
of the API call wasn't satisfied.
:type data: A JSON-serializable object
:param data: Provides the wrapper for the details of why the request
failed. If the reasons for failure correspond to POST values,
the response object's keys SHOULD correspond to those POST values.
"""
self.write({'status': 'fail', 'data': data})
self.finish()
|
hfaran/Tornado-JSON | tornado_json/schema.py | get_object_defaults | python | def get_object_defaults(object_schema):
default = {}
for k, schema in object_schema.get('properties', {}).items():
if schema.get('type') == 'object':
if 'default' in schema:
default[k] = schema['default']
try:
object_defaults = get_object_defaults(schema)
except NoObjectDefaults:
if 'default' not in schema:
raise NoObjectDefaults
else:
if 'default' not in schema:
default[k] = {}
default[k].update(object_defaults)
else:
if 'default' in schema:
default[k] = schema['default']
if default:
return default
raise NoObjectDefaults | Extracts default values dict (nested) from an type object schema.
:param object_schema: Schema type object
:type object_schema: dict
:returns: Nested dict with defaults values | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/schema.py#L26-L58 | [
"def get_object_defaults(object_schema):\n \"\"\"\n Extracts default values dict (nested) from an type object schema.\n\n :param object_schema: Schema type object\n :type object_schema: dict\n :returns: Nested dict with defaults values\n \"\"\"\n default = {}\n for k, schema in object_schema.get('properties', {}).items():\n\n if schema.get('type') == 'object':\n if 'default' in schema:\n default[k] = schema['default']\n\n try:\n object_defaults = get_object_defaults(schema)\n except NoObjectDefaults:\n if 'default' not in schema:\n raise NoObjectDefaults\n else:\n if 'default' not in schema:\n default[k] = {}\n\n default[k].update(object_defaults)\n else:\n if 'default' in schema:\n default[k] = schema['default']\n\n if default:\n return default\n\n raise NoObjectDefaults\n"
] | import json
from functools import wraps
import jsonschema
import tornado.gen
from tornado_json.exceptions import APIError
try:
from tornado.concurrent import is_future
except ImportError:
# For tornado 3.x.x
from tornado.concurrent import Future
is_future = lambda x: isinstance(x, Future)
from tornado_json.utils import container, deep_update
class NoObjectDefaults(Exception):
""" Raised when a schema type object ({"type": "object"}) has no "default"
key and one of their properties also don't have a "default" key.
"""
def input_schema_clean(input_, input_schema):
"""
Updates schema default values with input data.
:param input_: Input data
:type input_: dict
:param input_schema: Input schema
:type input_schema: dict
:returns: Nested dict with data (defaul values updated with input data)
:rtype: dict
"""
if input_schema.get('type') == 'object':
try:
defaults = get_object_defaults(input_schema)
except NoObjectDefaults:
pass
else:
return deep_update(defaults, input_)
return input_
def validate(input_schema=None, output_schema=None,
input_example=None, output_example=None,
validator_cls=None,
format_checker=None, on_empty_404=False,
use_defaults=False):
"""Parameterized decorator for schema validation
:type validator_cls: IValidator class
:type format_checker: jsonschema.FormatChecker or None
:type on_empty_404: bool
:param on_empty_404: If this is set, and the result from the
decorated method is a falsy value, a 404 will be raised.
:type use_defaults: bool
:param use_defaults: If this is set, will put 'default' keys
from schema to self.body (If schema type is object). Example:
{
'published': {'type': 'bool', 'default': False}
}
self.body will contains 'published' key with value False if no one
comes from request, also works with nested schemas.
"""
@container
def _validate(rh_method):
"""Decorator for RequestHandler schema validation
This decorator:
- Validates request body against input schema of the method
- Calls the ``rh_method`` and gets output from it
- Validates output against output schema of the method
- Calls ``JSendMixin.success`` to write the validated output
:type rh_method: function
:param rh_method: The RequestHandler method to be decorated
:returns: The decorated method
:raises ValidationError: If input is invalid as per the schema
or malformed
:raises TypeError: If the output is invalid as per the schema
or malformed
:raises APIError: If the output is a falsy value and
on_empty_404 is True, an HTTP 404 error is returned
"""
@wraps(rh_method)
@tornado.gen.coroutine
def _wrapper(self, *args, **kwargs):
# In case the specified input_schema is ``None``, we
# don't json.loads the input, but just set it to ``None``
# instead.
if input_schema is not None:
# Attempt to json.loads the input
try:
# TODO: Assuming UTF-8 encoding for all requests,
# find a nice way of determining this from charset
# in headers if provided
encoding = "UTF-8"
input_ = json.loads(self.request.body.decode(encoding))
except ValueError as e:
raise jsonschema.ValidationError(
"Input is malformed; could not decode JSON object."
)
if use_defaults:
input_ = input_schema_clean(input_, input_schema)
# Validate the received input
jsonschema.validate(
input_,
input_schema,
cls=validator_cls,
format_checker=format_checker
)
else:
input_ = None
# A json.loads'd version of self.request["body"] is now available
# as self.body
setattr(self, "body", input_)
# Call the requesthandler method
output = rh_method(self, *args, **kwargs)
# If the rh_method returned a Future a la `raise Return(value)`
# we grab the output.
if is_future(output):
output = yield output
# if output is empty, auto return the error 404.
if not output and on_empty_404:
raise APIError(404, "Resource not found.")
if output_schema is not None:
# We wrap output in an object before validating in case
# output is a string (and ergo not a validatable JSON object)
try:
jsonschema.validate(
{"result": output},
{
"type": "object",
"properties": {
"result": output_schema
},
"required": ["result"]
}
)
except jsonschema.ValidationError as e:
# We essentially re-raise this as a TypeError because
# we don't want this error data passed back to the client
# because it's a fault on our end. The client should
# only see a 500 - Internal Server Error.
raise TypeError(str(e))
# If no ValidationError has been raised up until here, we write
# back output
self.success(output)
setattr(_wrapper, "input_schema", input_schema)
setattr(_wrapper, "output_schema", output_schema)
setattr(_wrapper, "input_example", input_example)
setattr(_wrapper, "output_example", output_example)
return _wrapper
return _validate
|
hfaran/Tornado-JSON | tornado_json/schema.py | input_schema_clean | python | def input_schema_clean(input_, input_schema):
if input_schema.get('type') == 'object':
try:
defaults = get_object_defaults(input_schema)
except NoObjectDefaults:
pass
else:
return deep_update(defaults, input_)
return input_ | Updates schema default values with input data.
:param input_: Input data
:type input_: dict
:param input_schema: Input schema
:type input_schema: dict
:returns: Nested dict with data (defaul values updated with input data)
:rtype: dict | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/schema.py#L61-L79 | [
"def deep_update(source, overrides):\n \"\"\"Update a nested dictionary or similar mapping.\n\n Modify ``source`` in place.\n\n :type source: collections.Mapping\n :type overrides: collections.Mapping\n :rtype: collections.Mapping\n \"\"\"\n for key, value in overrides.items():\n if isinstance(value, collections.Mapping) and value:\n returned = deep_update(source.get(key, {}), value)\n source[key] = returned\n else:\n source[key] = overrides[key]\n return source\n",
"def get_object_defaults(object_schema):\n \"\"\"\n Extracts default values dict (nested) from an type object schema.\n\n :param object_schema: Schema type object\n :type object_schema: dict\n :returns: Nested dict with defaults values\n \"\"\"\n default = {}\n for k, schema in object_schema.get('properties', {}).items():\n\n if schema.get('type') == 'object':\n if 'default' in schema:\n default[k] = schema['default']\n\n try:\n object_defaults = get_object_defaults(schema)\n except NoObjectDefaults:\n if 'default' not in schema:\n raise NoObjectDefaults\n else:\n if 'default' not in schema:\n default[k] = {}\n\n default[k].update(object_defaults)\n else:\n if 'default' in schema:\n default[k] = schema['default']\n\n if default:\n return default\n\n raise NoObjectDefaults\n"
] | import json
from functools import wraps
import jsonschema
import tornado.gen
from tornado_json.exceptions import APIError
try:
from tornado.concurrent import is_future
except ImportError:
# For tornado 3.x.x
from tornado.concurrent import Future
is_future = lambda x: isinstance(x, Future)
from tornado_json.utils import container, deep_update
class NoObjectDefaults(Exception):
""" Raised when a schema type object ({"type": "object"}) has no "default"
key and one of their properties also don't have a "default" key.
"""
def get_object_defaults(object_schema):
"""
Extracts default values dict (nested) from an type object schema.
:param object_schema: Schema type object
:type object_schema: dict
:returns: Nested dict with defaults values
"""
default = {}
for k, schema in object_schema.get('properties', {}).items():
if schema.get('type') == 'object':
if 'default' in schema:
default[k] = schema['default']
try:
object_defaults = get_object_defaults(schema)
except NoObjectDefaults:
if 'default' not in schema:
raise NoObjectDefaults
else:
if 'default' not in schema:
default[k] = {}
default[k].update(object_defaults)
else:
if 'default' in schema:
default[k] = schema['default']
if default:
return default
raise NoObjectDefaults
def validate(input_schema=None, output_schema=None,
input_example=None, output_example=None,
validator_cls=None,
format_checker=None, on_empty_404=False,
use_defaults=False):
"""Parameterized decorator for schema validation
:type validator_cls: IValidator class
:type format_checker: jsonschema.FormatChecker or None
:type on_empty_404: bool
:param on_empty_404: If this is set, and the result from the
decorated method is a falsy value, a 404 will be raised.
:type use_defaults: bool
:param use_defaults: If this is set, will put 'default' keys
from schema to self.body (If schema type is object). Example:
{
'published': {'type': 'bool', 'default': False}
}
self.body will contains 'published' key with value False if no one
comes from request, also works with nested schemas.
"""
@container
def _validate(rh_method):
"""Decorator for RequestHandler schema validation
This decorator:
- Validates request body against input schema of the method
- Calls the ``rh_method`` and gets output from it
- Validates output against output schema of the method
- Calls ``JSendMixin.success`` to write the validated output
:type rh_method: function
:param rh_method: The RequestHandler method to be decorated
:returns: The decorated method
:raises ValidationError: If input is invalid as per the schema
or malformed
:raises TypeError: If the output is invalid as per the schema
or malformed
:raises APIError: If the output is a falsy value and
on_empty_404 is True, an HTTP 404 error is returned
"""
@wraps(rh_method)
@tornado.gen.coroutine
def _wrapper(self, *args, **kwargs):
# In case the specified input_schema is ``None``, we
# don't json.loads the input, but just set it to ``None``
# instead.
if input_schema is not None:
# Attempt to json.loads the input
try:
# TODO: Assuming UTF-8 encoding for all requests,
# find a nice way of determining this from charset
# in headers if provided
encoding = "UTF-8"
input_ = json.loads(self.request.body.decode(encoding))
except ValueError as e:
raise jsonschema.ValidationError(
"Input is malformed; could not decode JSON object."
)
if use_defaults:
input_ = input_schema_clean(input_, input_schema)
# Validate the received input
jsonschema.validate(
input_,
input_schema,
cls=validator_cls,
format_checker=format_checker
)
else:
input_ = None
# A json.loads'd version of self.request["body"] is now available
# as self.body
setattr(self, "body", input_)
# Call the requesthandler method
output = rh_method(self, *args, **kwargs)
# If the rh_method returned a Future a la `raise Return(value)`
# we grab the output.
if is_future(output):
output = yield output
# if output is empty, auto return the error 404.
if not output and on_empty_404:
raise APIError(404, "Resource not found.")
if output_schema is not None:
# We wrap output in an object before validating in case
# output is a string (and ergo not a validatable JSON object)
try:
jsonschema.validate(
{"result": output},
{
"type": "object",
"properties": {
"result": output_schema
},
"required": ["result"]
}
)
except jsonschema.ValidationError as e:
# We essentially re-raise this as a TypeError because
# we don't want this error data passed back to the client
# because it's a fault on our end. The client should
# only see a 500 - Internal Server Error.
raise TypeError(str(e))
# If no ValidationError has been raised up until here, we write
# back output
self.success(output)
setattr(_wrapper, "input_schema", input_schema)
setattr(_wrapper, "output_schema", output_schema)
setattr(_wrapper, "input_example", input_example)
setattr(_wrapper, "output_example", output_example)
return _wrapper
return _validate
|
hfaran/Tornado-JSON | tornado_json/schema.py | validate | python | def validate(input_schema=None, output_schema=None,
input_example=None, output_example=None,
validator_cls=None,
format_checker=None, on_empty_404=False,
use_defaults=False):
@container
def _validate(rh_method):
"""Decorator for RequestHandler schema validation
This decorator:
- Validates request body against input schema of the method
- Calls the ``rh_method`` and gets output from it
- Validates output against output schema of the method
- Calls ``JSendMixin.success`` to write the validated output
:type rh_method: function
:param rh_method: The RequestHandler method to be decorated
:returns: The decorated method
:raises ValidationError: If input is invalid as per the schema
or malformed
:raises TypeError: If the output is invalid as per the schema
or malformed
:raises APIError: If the output is a falsy value and
on_empty_404 is True, an HTTP 404 error is returned
"""
@wraps(rh_method)
@tornado.gen.coroutine
def _wrapper(self, *args, **kwargs):
# In case the specified input_schema is ``None``, we
# don't json.loads the input, but just set it to ``None``
# instead.
if input_schema is not None:
# Attempt to json.loads the input
try:
# TODO: Assuming UTF-8 encoding for all requests,
# find a nice way of determining this from charset
# in headers if provided
encoding = "UTF-8"
input_ = json.loads(self.request.body.decode(encoding))
except ValueError as e:
raise jsonschema.ValidationError(
"Input is malformed; could not decode JSON object."
)
if use_defaults:
input_ = input_schema_clean(input_, input_schema)
# Validate the received input
jsonschema.validate(
input_,
input_schema,
cls=validator_cls,
format_checker=format_checker
)
else:
input_ = None
# A json.loads'd version of self.request["body"] is now available
# as self.body
setattr(self, "body", input_)
# Call the requesthandler method
output = rh_method(self, *args, **kwargs)
# If the rh_method returned a Future a la `raise Return(value)`
# we grab the output.
if is_future(output):
output = yield output
# if output is empty, auto return the error 404.
if not output and on_empty_404:
raise APIError(404, "Resource not found.")
if output_schema is not None:
# We wrap output in an object before validating in case
# output is a string (and ergo not a validatable JSON object)
try:
jsonschema.validate(
{"result": output},
{
"type": "object",
"properties": {
"result": output_schema
},
"required": ["result"]
}
)
except jsonschema.ValidationError as e:
# We essentially re-raise this as a TypeError because
# we don't want this error data passed back to the client
# because it's a fault on our end. The client should
# only see a 500 - Internal Server Error.
raise TypeError(str(e))
# If no ValidationError has been raised up until here, we write
# back output
self.success(output)
setattr(_wrapper, "input_schema", input_schema)
setattr(_wrapper, "output_schema", output_schema)
setattr(_wrapper, "input_example", input_example)
setattr(_wrapper, "output_example", output_example)
return _wrapper
return _validate | Parameterized decorator for schema validation
:type validator_cls: IValidator class
:type format_checker: jsonschema.FormatChecker or None
:type on_empty_404: bool
:param on_empty_404: If this is set, and the result from the
decorated method is a falsy value, a 404 will be raised.
:type use_defaults: bool
:param use_defaults: If this is set, will put 'default' keys
from schema to self.body (If schema type is object). Example:
{
'published': {'type': 'bool', 'default': False}
}
self.body will contains 'published' key with value False if no one
comes from request, also works with nested schemas. | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/schema.py#L82-L201 | [
"def container(dec):\n \"\"\"Meta-decorator (for decorating decorators)\n\n Keeps around original decorated function as a property ``orig_func``\n\n :param dec: Decorator to decorate\n :type dec: function\n :returns: Decorated decorator\n \"\"\"\n # Credits: http://stackoverflow.com/a/1167248/1798683\n @wraps(dec)\n def meta_decorator(f):\n decorator = dec(f)\n decorator.orig_func = f\n return decorator\n return meta_decorator\n"
] | import json
from functools import wraps
import jsonschema
import tornado.gen
from tornado_json.exceptions import APIError
try:
from tornado.concurrent import is_future
except ImportError:
# For tornado 3.x.x
from tornado.concurrent import Future
is_future = lambda x: isinstance(x, Future)
from tornado_json.utils import container, deep_update
class NoObjectDefaults(Exception):
""" Raised when a schema type object ({"type": "object"}) has no "default"
key and one of their properties also don't have a "default" key.
"""
def get_object_defaults(object_schema):
"""
Extracts default values dict (nested) from an type object schema.
:param object_schema: Schema type object
:type object_schema: dict
:returns: Nested dict with defaults values
"""
default = {}
for k, schema in object_schema.get('properties', {}).items():
if schema.get('type') == 'object':
if 'default' in schema:
default[k] = schema['default']
try:
object_defaults = get_object_defaults(schema)
except NoObjectDefaults:
if 'default' not in schema:
raise NoObjectDefaults
else:
if 'default' not in schema:
default[k] = {}
default[k].update(object_defaults)
else:
if 'default' in schema:
default[k] = schema['default']
if default:
return default
raise NoObjectDefaults
def input_schema_clean(input_, input_schema):
"""
Updates schema default values with input data.
:param input_: Input data
:type input_: dict
:param input_schema: Input schema
:type input_schema: dict
:returns: Nested dict with data (defaul values updated with input data)
:rtype: dict
"""
if input_schema.get('type') == 'object':
try:
defaults = get_object_defaults(input_schema)
except NoObjectDefaults:
pass
else:
return deep_update(defaults, input_)
return input_
|
hfaran/Tornado-JSON | setup.py | read | python | def read(filename):
return codecs.open(os.path.join(__DIR__, filename), 'r').read() | Read and return `filename` in root dir of project and return string | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/setup.py#L10-L12 | null | import os
import sys
__DIR__ = os.path.abspath(os.path.dirname(__file__))
import codecs
from setuptools import setup
from setuptools.command.test import test as TestCommand
import tornado_json
install_requires = read("requirements.txt").split()
long_description = read('README.md')
class Pytest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--verbose']
self.test_suite = True
def run_tests(self):
# Using pytest rather than tox because Travis-CI has issues with tox
# Import here, cause outside the eggs aren't loaded
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(
name="Tornado-JSON",
version=tornado_json.__version__,
url='https://github.com/hfaran/Tornado-JSON',
license='MIT License',
author='Hamza Faran',
description=('A simple JSON API framework based on Tornado'),
long_description=long_description,
packages=['tornado_json'],
install_requires = install_requires,
tests_require=['pytest'],
cmdclass = {'test': Pytest},
data_files=[
# Populate this with any files config files etc.
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Application Frameworks",
]
)
|
hfaran/Tornado-JSON | tornado_json/utils.py | deep_update | python | def deep_update(source, overrides):
for key, value in overrides.items():
if isinstance(value, collections.Mapping) and value:
returned = deep_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source | Update a nested dictionary or similar mapping.
Modify ``source`` in place.
:type source: collections.Mapping
:type overrides: collections.Mapping
:rtype: collections.Mapping | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/utils.py#L7-L22 | null | import collections
import inspect
import types
from functools import wraps
def container(dec):
"""Meta-decorator (for decorating decorators)
Keeps around original decorated function as a property ``orig_func``
:param dec: Decorator to decorate
:type dec: function
:returns: Decorated decorator
"""
# Credits: http://stackoverflow.com/a/1167248/1798683
@wraps(dec)
def meta_decorator(f):
decorator = dec(f)
decorator.orig_func = f
return decorator
return meta_decorator
def extract_method(wrapped_method):
"""Gets original method if wrapped_method was decorated
:rtype: any([types.FunctionType, types.MethodType])
"""
# If method was decorated with validate, the original method
# is available as orig_func thanks to our container decorator
return wrapped_method.orig_func if \
hasattr(wrapped_method, "orig_func") else wrapped_method
def is_method(method):
method = extract_method(method)
# Can be either a method or a function
return type(method) in [types.MethodType, types.FunctionType]
def is_handler_subclass(cls, classnames=("ViewHandler", "APIHandler")):
"""Determines if ``cls`` is indeed a subclass of ``classnames``"""
if isinstance(cls, list):
return any(is_handler_subclass(c) for c in cls)
elif isinstance(cls, type):
return any(c.__name__ in classnames for c in inspect.getmro(cls))
else:
raise TypeError(
"Unexpected type `{}` for class `{}`".format(
type(cls),
cls
)
)
|
hfaran/Tornado-JSON | tornado_json/utils.py | container | python | def container(dec):
# Credits: http://stackoverflow.com/a/1167248/1798683
@wraps(dec)
def meta_decorator(f):
decorator = dec(f)
decorator.orig_func = f
return decorator
return meta_decorator | Meta-decorator (for decorating decorators)
Keeps around original decorated function as a property ``orig_func``
:param dec: Decorator to decorate
:type dec: function
:returns: Decorated decorator | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/utils.py#L25-L40 | null | import collections
import inspect
import types
from functools import wraps
def deep_update(source, overrides):
"""Update a nested dictionary or similar mapping.
Modify ``source`` in place.
:type source: collections.Mapping
:type overrides: collections.Mapping
:rtype: collections.Mapping
"""
for key, value in overrides.items():
if isinstance(value, collections.Mapping) and value:
returned = deep_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source
def extract_method(wrapped_method):
"""Gets original method if wrapped_method was decorated
:rtype: any([types.FunctionType, types.MethodType])
"""
# If method was decorated with validate, the original method
# is available as orig_func thanks to our container decorator
return wrapped_method.orig_func if \
hasattr(wrapped_method, "orig_func") else wrapped_method
def is_method(method):
method = extract_method(method)
# Can be either a method or a function
return type(method) in [types.MethodType, types.FunctionType]
def is_handler_subclass(cls, classnames=("ViewHandler", "APIHandler")):
"""Determines if ``cls`` is indeed a subclass of ``classnames``"""
if isinstance(cls, list):
return any(is_handler_subclass(c) for c in cls)
elif isinstance(cls, type):
return any(c.__name__ in classnames for c in inspect.getmro(cls))
else:
raise TypeError(
"Unexpected type `{}` for class `{}`".format(
type(cls),
cls
)
)
|
hfaran/Tornado-JSON | tornado_json/utils.py | is_handler_subclass | python | def is_handler_subclass(cls, classnames=("ViewHandler", "APIHandler")):
if isinstance(cls, list):
return any(is_handler_subclass(c) for c in cls)
elif isinstance(cls, type):
return any(c.__name__ in classnames for c in inspect.getmro(cls))
else:
raise TypeError(
"Unexpected type `{}` for class `{}`".format(
type(cls),
cls
)
) | Determines if ``cls`` is indeed a subclass of ``classnames`` | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/utils.py#L60-L72 | null | import collections
import inspect
import types
from functools import wraps
def deep_update(source, overrides):
"""Update a nested dictionary or similar mapping.
Modify ``source`` in place.
:type source: collections.Mapping
:type overrides: collections.Mapping
:rtype: collections.Mapping
"""
for key, value in overrides.items():
if isinstance(value, collections.Mapping) and value:
returned = deep_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source
def container(dec):
"""Meta-decorator (for decorating decorators)
Keeps around original decorated function as a property ``orig_func``
:param dec: Decorator to decorate
:type dec: function
:returns: Decorated decorator
"""
# Credits: http://stackoverflow.com/a/1167248/1798683
@wraps(dec)
def meta_decorator(f):
decorator = dec(f)
decorator.orig_func = f
return decorator
return meta_decorator
def extract_method(wrapped_method):
"""Gets original method if wrapped_method was decorated
:rtype: any([types.FunctionType, types.MethodType])
"""
# If method was decorated with validate, the original method
# is available as orig_func thanks to our container decorator
return wrapped_method.orig_func if \
hasattr(wrapped_method, "orig_func") else wrapped_method
def is_method(method):
method = extract_method(method)
# Can be either a method or a function
return type(method) in [types.MethodType, types.FunctionType]
|
hfaran/Tornado-JSON | tornado_json/requesthandlers.py | APIHandler.write_error | python | def write_error(self, status_code, **kwargs):
def get_exc_message(exception):
return exception.log_message if \
hasattr(exception, "log_message") else str(exception)
self.clear()
self.set_status(status_code)
# Any APIError exceptions raised will result in a JSend fail written
# back with the log_message as data. Hence, log_message should NEVER
# expose internals. Since log_message is proprietary to HTTPError
# class exceptions, all exceptions without it will return their
# __str__ representation.
# All other exceptions result in a JSend error being written back,
# with log_message only written if debug mode is enabled
exception = kwargs["exc_info"][1]
if any(isinstance(exception, c) for c in [APIError, ValidationError]):
# ValidationError is always due to a malformed request
if isinstance(exception, ValidationError):
self.set_status(400)
self.fail(get_exc_message(exception))
else:
self.error(
message=self._reason,
data=get_exc_message(exception) if self.settings.get("debug")
else None,
code=status_code
) | Override of RequestHandler.write_error
Calls ``error()`` or ``fail()`` from JSendMixin depending on which
exception was raised with provided reason and status code.
:type status_code: int
:param status_code: HTTP status code | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/requesthandlers.py#L50-L85 | [
"def get_exc_message(exception):\n return exception.log_message if \\\n hasattr(exception, \"log_message\") else str(exception)\n"
] | class APIHandler(BaseHandler, JSendMixin):
"""RequestHandler for API calls
- Sets header as ``application/json``
- Provides custom write_error that writes error back as JSON \
rather than as the standard HTML template
"""
def initialize(self):
"""
- Set Content-type for JSON
"""
self.set_header("Content-Type", "application/json")
|
hfaran/Tornado-JSON | tornado_json/routes.py | gen_submodule_names | python | def gen_submodule_names(package):
for importer, modname, ispkg in pkgutil.walk_packages(
path=package.__path__,
prefix=package.__name__ + '.',
onerror=lambda x: None):
yield modname | Walk package and yield names of all submodules
:type package: package
:param package: The package to get submodule names of
:returns: Iterator that yields names of all submodules of ``package``
:rtype: Iterator that yields ``str`` | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/routes.py#L28-L40 | null | import pkgutil
import importlib
import inspect
from itertools import chain
from functools import reduce
from tornado_json.constants import HTTP_METHODS
from tornado_json.utils import extract_method, is_method, is_handler_subclass
def get_routes(package):
"""
This will walk ``package`` and generates routes from any and all
``APIHandler`` and ``ViewHandler`` subclasses it finds. If you need to
customize or remove any routes, you can do so to the list of
returned routes that this generates.
:type package: package
:param package: The package containing RequestHandlers to generate
routes from
:returns: List of routes for all submodules of ``package``
:rtype: [(url, RequestHandler), ... ]
"""
return list(chain(*[get_module_routes(modname) for modname in
gen_submodule_names(package)]))
def get_module_routes(module_name, custom_routes=None, exclusions=None,
arg_pattern=r'(?P<{}>[a-zA-Z0-9_\-]+)'):
"""Create and return routes for module_name
Routes are (url, RequestHandler) tuples
:returns: list of routes for ``module_name`` with respect to ``exclusions``
and ``custom_routes``. Returned routes are with URLs formatted such
that they are forward-slash-separated by module/class level
and end with the lowercase name of the RequestHandler (it will also
remove 'handler' from the end of the name of the handler).
For example, a requesthandler with the name
``helloworld.api.HelloWorldHandler`` would be assigned the url
``/api/helloworld``.
Additionally, if a method has extra arguments aside from ``self`` in
its signature, routes with URL patterns will be generated to
match ``r"(?P<{}>[a-zA-Z0-9_\-]+)".format(argname)`` for each
argument. The aforementioned regex will match ONLY values
with alphanumeric, hyphen and underscore characters. You can provide
your own pattern by setting a ``arg_pattern`` param.
:rtype: [(url, RequestHandler), ... ]
:type module_name: str
:param module_name: Name of the module to get routes for
:type custom_routes: [(str, RequestHandler), ... ]
:param custom_routes: List of routes that have custom URLs and therefore
should be automagically generated
:type exclusions: [str, str, ...]
:param exclusions: List of RequestHandler names that routes should not be
generated for
:type arg_pattern: str
:param arg_pattern: Default pattern for extra arguments of any method
"""
def has_method(module, cls_name, method_name):
return all([
method_name in vars(getattr(module, cls_name)),
is_method(reduce(getattr, [module, cls_name, method_name]))
])
def yield_args(module, cls_name, method_name):
"""Get signature of ``module.cls_name.method_name``
Confession: This function doesn't actually ``yield`` the arguments,
just returns a list. Trust me, it's better that way.
:returns: List of arg names from method_name except ``self``
:rtype: list
"""
wrapped_method = reduce(getattr, [module, cls_name, method_name])
method = extract_method(wrapped_method)
# If using tornado_json.gen.coroutine, original args are annotated...
argspec_args = getattr(method, "__argspec_args",
# otherwise just grab them from the method
inspect.getargspec(method).args)
return [a for a in argspec_args if a not in ["self"]]
def generate_auto_route(module, module_name, cls_name, method_name, url_name):
"""Generate URL for auto_route
:rtype: str
:returns: Constructed URL based on given arguments
"""
def get_handler_name():
"""Get handler identifier for URL
For the special case where ``url_name`` is
``__self__``, the handler is named a lowercase
value of its own name with 'handler' removed
from the ending if give; otherwise, we
simply use the provided ``url_name``
"""
if url_name == "__self__":
if cls_name.lower().endswith('handler'):
return cls_name.lower().replace('handler', '', 1)
return cls_name.lower()
else:
return url_name
def get_arg_route():
"""Get remainder of URL determined by method argspec
:returns: Remainder of URL which matches `\w+` regex
with groups named by the method's argument spec.
If there are no arguments given, returns ``""``.
:rtype: str
"""
if yield_args(module, cls_name, method_name):
return "/{}/?$".format("/".join(
[arg_pattern.format(argname) for argname
in yield_args(module, cls_name, method_name)]
))
return r"/?"
return "/{}/{}{}".format(
"/".join(module_name.split(".")[1:]),
get_handler_name(),
get_arg_route()
)
if not custom_routes:
custom_routes = []
if not exclusions:
exclusions = []
# Import module so we can get its request handlers
module = importlib.import_module(module_name)
# Generate list of RequestHandler names in custom_routes
custom_routes_s = [c.__name__ for r, c in custom_routes]
rhs = {cls_name: cls for (cls_name, cls) in
inspect.getmembers(module, inspect.isclass)}
# You better believe this is a list comprehension
auto_routes = list(chain(*[
list(set(chain(*[
# Generate a route for each "name" specified in the
# __url_names__ attribute of the handler
[
# URL, requesthandler tuple
(
generate_auto_route(
module, module_name, cls_name, method_name, url_name
),
getattr(module, cls_name)
) for url_name in getattr(module, cls_name).__url_names__
# Add routes for each custom URL specified in the
# __urls__ attribute of the handler
] + [
(
url,
getattr(module, cls_name)
) for url in getattr(module, cls_name).__urls__
]
# We create a route for each HTTP method in the handler
# so that we catch all possible routes if different
# HTTP methods have different argspecs and are expecting
# to catch different routes. Any duplicate routes
# are removed from the set() comparison.
for method_name in HTTP_METHODS if has_method(
module, cls_name, method_name)
])))
# foreach classname, pyclbr.Class in rhs
for cls_name, cls in rhs.items()
# Only add the pair to auto_routes if:
# * the superclass is in the list of supers we want
# * the requesthandler isn't already paired in custom_routes
# * the requesthandler isn't manually excluded
if is_handler_subclass(cls)
and cls_name not in (custom_routes_s + exclusions)
]))
routes = auto_routes + custom_routes
return routes
|
hfaran/Tornado-JSON | tornado_json/routes.py | get_module_routes | python | def get_module_routes(module_name, custom_routes=None, exclusions=None,
arg_pattern=r'(?P<{}>[a-zA-Z0-9_\-]+)'):
def has_method(module, cls_name, method_name):
return all([
method_name in vars(getattr(module, cls_name)),
is_method(reduce(getattr, [module, cls_name, method_name]))
])
def yield_args(module, cls_name, method_name):
"""Get signature of ``module.cls_name.method_name``
Confession: This function doesn't actually ``yield`` the arguments,
just returns a list. Trust me, it's better that way.
:returns: List of arg names from method_name except ``self``
:rtype: list
"""
wrapped_method = reduce(getattr, [module, cls_name, method_name])
method = extract_method(wrapped_method)
# If using tornado_json.gen.coroutine, original args are annotated...
argspec_args = getattr(method, "__argspec_args",
# otherwise just grab them from the method
inspect.getargspec(method).args)
return [a for a in argspec_args if a not in ["self"]]
def generate_auto_route(module, module_name, cls_name, method_name, url_name):
"""Generate URL for auto_route
:rtype: str
:returns: Constructed URL based on given arguments
"""
def get_handler_name():
"""Get handler identifier for URL
For the special case where ``url_name`` is
``__self__``, the handler is named a lowercase
value of its own name with 'handler' removed
from the ending if give; otherwise, we
simply use the provided ``url_name``
"""
if url_name == "__self__":
if cls_name.lower().endswith('handler'):
return cls_name.lower().replace('handler', '', 1)
return cls_name.lower()
else:
return url_name
def get_arg_route():
"""Get remainder of URL determined by method argspec
:returns: Remainder of URL which matches `\w+` regex
with groups named by the method's argument spec.
If there are no arguments given, returns ``""``.
:rtype: str
"""
if yield_args(module, cls_name, method_name):
return "/{}/?$".format("/".join(
[arg_pattern.format(argname) for argname
in yield_args(module, cls_name, method_name)]
))
return r"/?"
return "/{}/{}{}".format(
"/".join(module_name.split(".")[1:]),
get_handler_name(),
get_arg_route()
)
if not custom_routes:
custom_routes = []
if not exclusions:
exclusions = []
# Import module so we can get its request handlers
module = importlib.import_module(module_name)
# Generate list of RequestHandler names in custom_routes
custom_routes_s = [c.__name__ for r, c in custom_routes]
rhs = {cls_name: cls for (cls_name, cls) in
inspect.getmembers(module, inspect.isclass)}
# You better believe this is a list comprehension
auto_routes = list(chain(*[
list(set(chain(*[
# Generate a route for each "name" specified in the
# __url_names__ attribute of the handler
[
# URL, requesthandler tuple
(
generate_auto_route(
module, module_name, cls_name, method_name, url_name
),
getattr(module, cls_name)
) for url_name in getattr(module, cls_name).__url_names__
# Add routes for each custom URL specified in the
# __urls__ attribute of the handler
] + [
(
url,
getattr(module, cls_name)
) for url in getattr(module, cls_name).__urls__
]
# We create a route for each HTTP method in the handler
# so that we catch all possible routes if different
# HTTP methods have different argspecs and are expecting
# to catch different routes. Any duplicate routes
# are removed from the set() comparison.
for method_name in HTTP_METHODS if has_method(
module, cls_name, method_name)
])))
# foreach classname, pyclbr.Class in rhs
for cls_name, cls in rhs.items()
# Only add the pair to auto_routes if:
# * the superclass is in the list of supers we want
# * the requesthandler isn't already paired in custom_routes
# * the requesthandler isn't manually excluded
if is_handler_subclass(cls)
and cls_name not in (custom_routes_s + exclusions)
]))
routes = auto_routes + custom_routes
return routes | Create and return routes for module_name
Routes are (url, RequestHandler) tuples
:returns: list of routes for ``module_name`` with respect to ``exclusions``
and ``custom_routes``. Returned routes are with URLs formatted such
that they are forward-slash-separated by module/class level
and end with the lowercase name of the RequestHandler (it will also
remove 'handler' from the end of the name of the handler).
For example, a requesthandler with the name
``helloworld.api.HelloWorldHandler`` would be assigned the url
``/api/helloworld``.
Additionally, if a method has extra arguments aside from ``self`` in
its signature, routes with URL patterns will be generated to
match ``r"(?P<{}>[a-zA-Z0-9_\-]+)".format(argname)`` for each
argument. The aforementioned regex will match ONLY values
with alphanumeric, hyphen and underscore characters. You can provide
your own pattern by setting a ``arg_pattern`` param.
:rtype: [(url, RequestHandler), ... ]
:type module_name: str
:param module_name: Name of the module to get routes for
:type custom_routes: [(str, RequestHandler), ... ]
:param custom_routes: List of routes that have custom URLs and therefore
should be automagically generated
:type exclusions: [str, str, ...]
:param exclusions: List of RequestHandler names that routes should not be
generated for
:type arg_pattern: str
:param arg_pattern: Default pattern for extra arguments of any method | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/routes.py#L43-L197 | null | import pkgutil
import importlib
import inspect
from itertools import chain
from functools import reduce
from tornado_json.constants import HTTP_METHODS
from tornado_json.utils import extract_method, is_method, is_handler_subclass
def get_routes(package):
"""
This will walk ``package`` and generates routes from any and all
``APIHandler`` and ``ViewHandler`` subclasses it finds. If you need to
customize or remove any routes, you can do so to the list of
returned routes that this generates.
:type package: package
:param package: The package containing RequestHandlers to generate
routes from
:returns: List of routes for all submodules of ``package``
:rtype: [(url, RequestHandler), ... ]
"""
return list(chain(*[get_module_routes(modname) for modname in
gen_submodule_names(package)]))
def gen_submodule_names(package):
"""Walk package and yield names of all submodules
:type package: package
:param package: The package to get submodule names of
:returns: Iterator that yields names of all submodules of ``package``
:rtype: Iterator that yields ``str``
"""
for importer, modname, ispkg in pkgutil.walk_packages(
path=package.__path__,
prefix=package.__name__ + '.',
onerror=lambda x: None):
yield modname
|
hfaran/Tornado-JSON | demos/helloworld/helloworld/api.py | AsyncHelloWorld.get | python | def get(self, name):
# Asynchronously yield a result from a method
res = yield gen.Task(self.hello, name)
# When using the `schema.validate` decorator asynchronously,
# we can return the output desired by raising
# `tornado.gen.Return(value)` which returns a
# Future that the decorator will yield.
# In Python 3.3, using `raise Return(value)` is no longer
# necessary and can be replaced with simply `return value`.
# For details, see:
# http://www.tornadoweb.org/en/branch3.2/gen.html#tornado.gen.Return
# return res # Python 3.3
raise gen.Return(res) | Shouts hello to the world (asynchronously)! | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/demos/helloworld/helloworld/api.py#L93-L108 | null | class AsyncHelloWorld(APIHandler):
def hello(self, name, callback=None):
callback("Hello (asynchronous) world! My name is {}.".format(name))
@schema.validate(
output_schema={"type": "string"},
output_example="Hello (asynchronous) world! My name is Fred."
)
# ``tornado_json.gen.coroutine`` must be used for coroutines
# ``tornado.gen.coroutine`` CANNOT be used directly
@coroutine
# Python 2.7
|
hfaran/Tornado-JSON | tornado_json/gen.py | coroutine | python | def coroutine(func, replace_callback=True):
# gen.coroutine in tornado 3.x.x and 5.x.x have a different signature than 4.x.x
if TORNADO_MAJOR != 4:
wrapper = gen.coroutine(func)
else:
wrapper = gen.coroutine(func, replace_callback)
wrapper.__argspec_args = inspect.getargspec(func).args
return wrapper | Tornado-JSON compatible wrapper for ``tornado.gen.coroutine``
Annotates original argspec.args of ``func`` as attribute ``__argspec_args`` | train | https://github.com/hfaran/Tornado-JSON/blob/8d8b35ff77f13cb3ab1a606bd2083b26cc69c54f/tornado_json/gen.py#L8-L19 | null | import inspect
from tornado import gen
from tornado_json.constants import TORNADO_MAJOR
|
openwisp/netdiff | netdiff/parsers/netjson.py | NetJsonParser.parse | python | def parse(self, data):
graph = self._init_graph()
# ensure is NetJSON NetworkGraph object
if 'type' not in data or data['type'] != 'NetworkGraph':
raise ParserError('Parse error, not a NetworkGraph object')
# ensure required keys are present
required_keys = ['protocol', 'version', 'metric', 'nodes', 'links']
for key in required_keys:
if key not in data:
raise ParserError('Parse error, "{0}" key not found'.format(key))
# store metadata
self.protocol = data['protocol']
self.version = data['version']
self.revision = data.get('revision') # optional
self.metric = data['metric']
# create graph
for node in data['nodes']:
graph.add_node(node['id'],
label=node['label'] if 'label' in node else None,
local_addresses=node.get('local_addresses', []),
**node.get('properties', {}))
for link in data['links']:
try:
source = link["source"]
dest = link["target"]
cost = link["cost"]
except KeyError as e:
raise ParserError('Parse error, "%s" key not found' % e)
properties = link.get('properties', {})
graph.add_edge(source, dest, weight=cost, **properties)
return graph | Converts a NetJSON 'NetworkGraph' object
to a NetworkX Graph object,which is then returned.
Additionally checks for protocol version, revision and metric. | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/netjson.py#L8-L45 | [
"def _init_graph(self):\n return networkx.DiGraph() if self.directed else networkx.Graph()\n"
] | class NetJsonParser(BaseParser):
""" NetJSON (0.1) parser """
|
openwisp/netdiff | netdiff/parsers/openvpn.py | OpenvpnParser.parse | python | def parse(self, data):
# initialize graph and list of aggregated nodes
graph = self._init_graph()
server = self._server_common_name
# add server (central node) to graph
graph.add_node(server)
# data may be empty
if data is None:
clients = []
links = []
else:
clients = data.client_list.values()
links = data.routing_table.values()
# add clients in graph as nodes
for client in clients:
if client.common_name == 'UNDEF':
continue
client_properties = {
'label': client.common_name,
'real_address': str(client.real_address.host),
'port': int(client.real_address.port),
'connected_since': client.connected_since.strftime('%Y-%m-%dT%H:%M:%SZ'),
'bytes_received': int(client.bytes_received),
'bytes_sent': int(client.bytes_sent)
}
local_addresses = [
str(route.virtual_address)
for route in data.routing_table.values()
if route.real_address == client.real_address
]
if local_addresses:
client_properties['local_addresses'] = local_addresses
graph.add_node(str(client.real_address.host), **client_properties)
# add links in routing table to graph
for link in links:
if link.common_name == 'UNDEF':
continue
graph.add_edge(server, str(link.real_address.host), weight=1)
return graph | Converts a OpenVPN JSON to a NetworkX Graph object
which is then returned. | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/openvpn.py#L25-L67 | [
"def _init_graph(self):\n return networkx.DiGraph() if self.directed else networkx.Graph()\n"
] | class OpenvpnParser(BaseParser):
""" OpenVPN status log parser """
protocol = 'OpenVPN Status Log'
version = '1'
metric = 'static'
# for internal use only
_server_common_name = 'openvpn-server'
def to_python(self, data):
if not data:
return None
try:
return parse_status(data)
except (AttributeError, ParsingError) as e:
msg = 'OpenVPN parsing error: {0}'.format(str(e))
raise ConversionException(msg, data=data)
|
openwisp/netdiff | netdiff/parsers/batman.py | BatmanParser.to_python | python | def to_python(self, data):
try:
return super(BatmanParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_python(e.data) | Adds support for txtinfo format | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/batman.py#L14-L21 | [
"def to_python(self, data):\n \"\"\"\n Parses the input data and converts it into a Python data structure\n Input data might be:\n * a path which points to a JSON file\n * a URL which points to a JSON file\n (supported schemes: http, https, telnet)\n * a JSON formatted string\n * a dict representing a JSON structure\n \"\"\"\n if isinstance(data, dict):\n return data\n elif isinstance(data, six.string_types):\n # assuming is JSON\n try:\n return json.loads(data)\n except ValueError:\n pass\n raise ConversionException('Could not recognize format', data=data)\n",
"def _txtinfo_to_python(self, data):\n \"\"\"\n Converts txtinfo format to python\n \"\"\"\n self._format = 'txtinfo'\n # find interesting section\n lines = data.split('\\n')\n try:\n start = lines.index('Table: Topology') + 2\n except ValueError:\n raise ParserError('Unrecognized format')\n topology_lines = [line for line in lines[start:] if line]\n # convert to python list\n parsed_lines = []\n for line in topology_lines:\n values = line.split(' ')\n parsed_lines.append({\n 'source': values[0],\n 'target': values[1],\n 'cost': float(values[4])\n })\n return parsed_lines\n"
] | class BatmanParser(BaseParser):
""" batman-adv parser """
protocol = 'batman-adv'
version = '2015.0'
metric = 'TQ'
# the default expected format
_format = 'alfred_vis'
def _txtinfo_to_python(self, data):
"""
Converts txtinfo format to python
"""
self._format = 'txtinfo'
# find interesting section
lines = data.split('\n')
try:
start = lines.index('Table: Topology') + 2
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = [line for line in lines[start:] if line]
# convert to python list
parsed_lines = []
for line in topology_lines:
values = line.split(' ')
parsed_lines.append({
'source': values[0],
'target': values[1],
'cost': float(values[4])
})
return parsed_lines
def _get_primary_address(self, mac_address, node_list):
"""
Uses the _get_aggregated_node_list structure to find
the primary mac address associated to a secondary one,
if none is found returns itself.
"""
for local_addresses in node_list:
if mac_address in local_addresses:
return local_addresses[0]
return mac_address
def _get_aggregated_node_list(self, data):
"""
Returns list of main and secondary mac addresses.
"""
node_list = []
for node in data:
local_addresses = [node['primary']]
if 'secondary' in node:
local_addresses += node['secondary']
node_list.append(local_addresses)
return node_list
def parse(self, data):
"""
Calls the right method depending on the format,
which can be one of the wollowing:
* alfred_vis
* txtinfo
"""
method = getattr(self, '_parse_{0}'.format(self._format))
return method(data)
def _parse_alfred_vis(self, data):
"""
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
"""
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version']
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found')
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data["vis"]:
for neigh in node["neighbors"]:
graph.add_node(node['primary'], **{
'local_addresses': node.get('secondary', []),
'clients': node.get('clients', [])
})
primary_neigh = self._get_primary_address(neigh['neighbor'],
node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'],
primary_neigh,
weight=float(neigh['metric']))
return graph
def _parse_txtinfo(self, data):
"""
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
"""
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph
|
openwisp/netdiff | netdiff/parsers/batman.py | BatmanParser._txtinfo_to_python | python | def _txtinfo_to_python(self, data):
self._format = 'txtinfo'
# find interesting section
lines = data.split('\n')
try:
start = lines.index('Table: Topology') + 2
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = [line for line in lines[start:] if line]
# convert to python list
parsed_lines = []
for line in topology_lines:
values = line.split(' ')
parsed_lines.append({
'source': values[0],
'target': values[1],
'cost': float(values[4])
})
return parsed_lines | Converts txtinfo format to python | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/batman.py#L23-L44 | null | class BatmanParser(BaseParser):
""" batman-adv parser """
protocol = 'batman-adv'
version = '2015.0'
metric = 'TQ'
# the default expected format
_format = 'alfred_vis'
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(BatmanParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_python(e.data)
def _get_primary_address(self, mac_address, node_list):
"""
Uses the _get_aggregated_node_list structure to find
the primary mac address associated to a secondary one,
if none is found returns itself.
"""
for local_addresses in node_list:
if mac_address in local_addresses:
return local_addresses[0]
return mac_address
def _get_aggregated_node_list(self, data):
"""
Returns list of main and secondary mac addresses.
"""
node_list = []
for node in data:
local_addresses = [node['primary']]
if 'secondary' in node:
local_addresses += node['secondary']
node_list.append(local_addresses)
return node_list
def parse(self, data):
"""
Calls the right method depending on the format,
which can be one of the wollowing:
* alfred_vis
* txtinfo
"""
method = getattr(self, '_parse_{0}'.format(self._format))
return method(data)
def _parse_alfred_vis(self, data):
"""
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
"""
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version']
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found')
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data["vis"]:
for neigh in node["neighbors"]:
graph.add_node(node['primary'], **{
'local_addresses': node.get('secondary', []),
'clients': node.get('clients', [])
})
primary_neigh = self._get_primary_address(neigh['neighbor'],
node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'],
primary_neigh,
weight=float(neigh['metric']))
return graph
def _parse_txtinfo(self, data):
"""
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
"""
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph
|
openwisp/netdiff | netdiff/parsers/batman.py | BatmanParser._get_primary_address | python | def _get_primary_address(self, mac_address, node_list):
for local_addresses in node_list:
if mac_address in local_addresses:
return local_addresses[0]
return mac_address | Uses the _get_aggregated_node_list structure to find
the primary mac address associated to a secondary one,
if none is found returns itself. | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/batman.py#L46-L55 | null | class BatmanParser(BaseParser):
""" batman-adv parser """
protocol = 'batman-adv'
version = '2015.0'
metric = 'TQ'
# the default expected format
_format = 'alfred_vis'
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(BatmanParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_python(e.data)
def _txtinfo_to_python(self, data):
"""
Converts txtinfo format to python
"""
self._format = 'txtinfo'
# find interesting section
lines = data.split('\n')
try:
start = lines.index('Table: Topology') + 2
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = [line for line in lines[start:] if line]
# convert to python list
parsed_lines = []
for line in topology_lines:
values = line.split(' ')
parsed_lines.append({
'source': values[0],
'target': values[1],
'cost': float(values[4])
})
return parsed_lines
def _get_aggregated_node_list(self, data):
"""
Returns list of main and secondary mac addresses.
"""
node_list = []
for node in data:
local_addresses = [node['primary']]
if 'secondary' in node:
local_addresses += node['secondary']
node_list.append(local_addresses)
return node_list
def parse(self, data):
"""
Calls the right method depending on the format,
which can be one of the wollowing:
* alfred_vis
* txtinfo
"""
method = getattr(self, '_parse_{0}'.format(self._format))
return method(data)
def _parse_alfred_vis(self, data):
"""
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
"""
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version']
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found')
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data["vis"]:
for neigh in node["neighbors"]:
graph.add_node(node['primary'], **{
'local_addresses': node.get('secondary', []),
'clients': node.get('clients', [])
})
primary_neigh = self._get_primary_address(neigh['neighbor'],
node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'],
primary_neigh,
weight=float(neigh['metric']))
return graph
def _parse_txtinfo(self, data):
"""
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
"""
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph
|
openwisp/netdiff | netdiff/parsers/batman.py | BatmanParser._get_aggregated_node_list | python | def _get_aggregated_node_list(self, data):
node_list = []
for node in data:
local_addresses = [node['primary']]
if 'secondary' in node:
local_addresses += node['secondary']
node_list.append(local_addresses)
return node_list | Returns list of main and secondary mac addresses. | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/batman.py#L57-L67 | null | class BatmanParser(BaseParser):
""" batman-adv parser """
protocol = 'batman-adv'
version = '2015.0'
metric = 'TQ'
# the default expected format
_format = 'alfred_vis'
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(BatmanParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_python(e.data)
def _txtinfo_to_python(self, data):
"""
Converts txtinfo format to python
"""
self._format = 'txtinfo'
# find interesting section
lines = data.split('\n')
try:
start = lines.index('Table: Topology') + 2
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = [line for line in lines[start:] if line]
# convert to python list
parsed_lines = []
for line in topology_lines:
values = line.split(' ')
parsed_lines.append({
'source': values[0],
'target': values[1],
'cost': float(values[4])
})
return parsed_lines
def _get_primary_address(self, mac_address, node_list):
"""
Uses the _get_aggregated_node_list structure to find
the primary mac address associated to a secondary one,
if none is found returns itself.
"""
for local_addresses in node_list:
if mac_address in local_addresses:
return local_addresses[0]
return mac_address
def parse(self, data):
"""
Calls the right method depending on the format,
which can be one of the wollowing:
* alfred_vis
* txtinfo
"""
method = getattr(self, '_parse_{0}'.format(self._format))
return method(data)
def _parse_alfred_vis(self, data):
"""
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
"""
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version']
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found')
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data["vis"]:
for neigh in node["neighbors"]:
graph.add_node(node['primary'], **{
'local_addresses': node.get('secondary', []),
'clients': node.get('clients', [])
})
primary_neigh = self._get_primary_address(neigh['neighbor'],
node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'],
primary_neigh,
weight=float(neigh['metric']))
return graph
def _parse_txtinfo(self, data):
"""
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
"""
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph
|
openwisp/netdiff | netdiff/parsers/batman.py | BatmanParser.parse | python | def parse(self, data):
method = getattr(self, '_parse_{0}'.format(self._format))
return method(data) | Calls the right method depending on the format,
which can be one of the wollowing:
* alfred_vis
* txtinfo | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/batman.py#L69-L77 | null | class BatmanParser(BaseParser):
""" batman-adv parser """
protocol = 'batman-adv'
version = '2015.0'
metric = 'TQ'
# the default expected format
_format = 'alfred_vis'
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(BatmanParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_python(e.data)
def _txtinfo_to_python(self, data):
"""
Converts txtinfo format to python
"""
self._format = 'txtinfo'
# find interesting section
lines = data.split('\n')
try:
start = lines.index('Table: Topology') + 2
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = [line for line in lines[start:] if line]
# convert to python list
parsed_lines = []
for line in topology_lines:
values = line.split(' ')
parsed_lines.append({
'source': values[0],
'target': values[1],
'cost': float(values[4])
})
return parsed_lines
def _get_primary_address(self, mac_address, node_list):
"""
Uses the _get_aggregated_node_list structure to find
the primary mac address associated to a secondary one,
if none is found returns itself.
"""
for local_addresses in node_list:
if mac_address in local_addresses:
return local_addresses[0]
return mac_address
def _get_aggregated_node_list(self, data):
"""
Returns list of main and secondary mac addresses.
"""
node_list = []
for node in data:
local_addresses = [node['primary']]
if 'secondary' in node:
local_addresses += node['secondary']
node_list.append(local_addresses)
return node_list
def _parse_alfred_vis(self, data):
"""
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
"""
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version']
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found')
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data["vis"]:
for neigh in node["neighbors"]:
graph.add_node(node['primary'], **{
'local_addresses': node.get('secondary', []),
'clients': node.get('clients', [])
})
primary_neigh = self._get_primary_address(neigh['neighbor'],
node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'],
primary_neigh,
weight=float(neigh['metric']))
return graph
def _parse_txtinfo(self, data):
"""
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
"""
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph
|
openwisp/netdiff | netdiff/parsers/batman.py | BatmanParser._parse_alfred_vis | python | def _parse_alfred_vis(self, data):
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version']
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found')
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data["vis"]:
for neigh in node["neighbors"]:
graph.add_node(node['primary'], **{
'local_addresses': node.get('secondary', []),
'clients': node.get('clients', [])
})
primary_neigh = self._get_primary_address(neigh['neighbor'],
node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'],
primary_neigh,
weight=float(neigh['metric']))
return graph | Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version. | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/batman.py#L79-L106 | null | class BatmanParser(BaseParser):
""" batman-adv parser """
protocol = 'batman-adv'
version = '2015.0'
metric = 'TQ'
# the default expected format
_format = 'alfred_vis'
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(BatmanParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_python(e.data)
def _txtinfo_to_python(self, data):
"""
Converts txtinfo format to python
"""
self._format = 'txtinfo'
# find interesting section
lines = data.split('\n')
try:
start = lines.index('Table: Topology') + 2
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = [line for line in lines[start:] if line]
# convert to python list
parsed_lines = []
for line in topology_lines:
values = line.split(' ')
parsed_lines.append({
'source': values[0],
'target': values[1],
'cost': float(values[4])
})
return parsed_lines
def _get_primary_address(self, mac_address, node_list):
"""
Uses the _get_aggregated_node_list structure to find
the primary mac address associated to a secondary one,
if none is found returns itself.
"""
for local_addresses in node_list:
if mac_address in local_addresses:
return local_addresses[0]
return mac_address
def _get_aggregated_node_list(self, data):
"""
Returns list of main and secondary mac addresses.
"""
node_list = []
for node in data:
local_addresses = [node['primary']]
if 'secondary' in node:
local_addresses += node['secondary']
node_list.append(local_addresses)
return node_list
def parse(self, data):
"""
Calls the right method depending on the format,
which can be one of the wollowing:
* alfred_vis
* txtinfo
"""
method = getattr(self, '_parse_{0}'.format(self._format))
return method(data)
def _parse_txtinfo(self, data):
"""
Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned.
"""
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph
|
openwisp/netdiff | netdiff/parsers/batman.py | BatmanParser._parse_txtinfo | python | def _parse_txtinfo(self, data):
graph = self._init_graph()
for link in data:
graph.add_edge(link['source'],
link['target'],
weight=link['cost'])
return graph | Converts the python list returned by self._txtinfo_to_python()
to a NetworkX Graph object, which is then returned. | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/batman.py#L108-L118 | null | class BatmanParser(BaseParser):
""" batman-adv parser """
protocol = 'batman-adv'
version = '2015.0'
metric = 'TQ'
# the default expected format
_format = 'alfred_vis'
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(BatmanParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_python(e.data)
def _txtinfo_to_python(self, data):
"""
Converts txtinfo format to python
"""
self._format = 'txtinfo'
# find interesting section
lines = data.split('\n')
try:
start = lines.index('Table: Topology') + 2
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = [line for line in lines[start:] if line]
# convert to python list
parsed_lines = []
for line in topology_lines:
values = line.split(' ')
parsed_lines.append({
'source': values[0],
'target': values[1],
'cost': float(values[4])
})
return parsed_lines
def _get_primary_address(self, mac_address, node_list):
"""
Uses the _get_aggregated_node_list structure to find
the primary mac address associated to a secondary one,
if none is found returns itself.
"""
for local_addresses in node_list:
if mac_address in local_addresses:
return local_addresses[0]
return mac_address
def _get_aggregated_node_list(self, data):
"""
Returns list of main and secondary mac addresses.
"""
node_list = []
for node in data:
local_addresses = [node['primary']]
if 'secondary' in node:
local_addresses += node['secondary']
node_list.append(local_addresses)
return node_list
def parse(self, data):
"""
Calls the right method depending on the format,
which can be one of the wollowing:
* alfred_vis
* txtinfo
"""
method = getattr(self, '_parse_{0}'.format(self._format))
return method(data)
def _parse_alfred_vis(self, data):
"""
Converts a alfred-vis JSON object
to a NetworkX Graph object which is then returned.
Additionally checks for "source_vesion" to determine the batman-adv version.
"""
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if 'source_version' in data:
self.version = data['source_version']
if 'vis' not in data:
raise ParserError('Parse error, "vis" key not found')
node_list = self._get_aggregated_node_list(data['vis'])
# loop over topology section and create networkx graph
for node in data["vis"]:
for neigh in node["neighbors"]:
graph.add_node(node['primary'], **{
'local_addresses': node.get('secondary', []),
'clients': node.get('clients', [])
})
primary_neigh = self._get_primary_address(neigh['neighbor'],
node_list)
# networkx automatically ignores duplicated edges
graph.add_edge(node['primary'],
primary_neigh,
weight=float(neigh['metric']))
return graph
|
openwisp/netdiff | netdiff/parsers/base.py | BaseParser.to_python | python | def to_python(self, data):
if isinstance(data, dict):
return data
elif isinstance(data, six.string_types):
# assuming is JSON
try:
return json.loads(data)
except ValueError:
pass
raise ConversionException('Could not recognize format', data=data) | Parses the input data and converts it into a Python data structure
Input data might be:
* a path which points to a JSON file
* a URL which points to a JSON file
(supported schemes: http, https, telnet)
* a JSON formatted string
* a dict representing a JSON structure | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/base.py#L76-L94 | null | class BaseParser(object):
"""
Base Class for Parsers
To create a parser, extend this class
and implement a parse method
"""
protocol = None
version = None
revision = None
metric = None
def __init__(self, data=None, url=None, file=None,
version=None, revision=None, metric=None,
timeout=None, verify=True, directed=False): # noqa
"""
Initializes a new Parser
:param data: ``str`` or ``dict`` containing topology data
:param url: HTTP URL to retrieve topology data
:param file: path to file containing topology data
:param version: routing protocol version
:param revision: routing protocol revision
:param metric: routing protocol metric
:param timeout: timeout in seconds for HTTP or telnet requests
:param verify: boolean (valid for HTTPS requests only)
:param directed: whether the resulting graph should be directed
(undirected by default for backwards compatibility)
"""
if version:
self.version = version
if revision:
self.revision = revision
if metric:
self.metric = metric
self.timeout = timeout
self.verify = verify
self.directed = directed
if data is None and url is not None:
data = self._get_url(url)
elif data is None and file is not None:
data = self._get_file(file)
elif data is None and url is None and file is None:
raise ValueError('no topology data supplied, on of the following arguments'
'must be supplied: data, url or file')
self.original_data = self.to_python(data)
# avoid throwing NotImplementedError in tests
if self.__class__ is not BaseParser:
self.graph = self.parse(self.original_data)
def _get_url(self, url):
url = urlparse.urlparse(url)
if url.scheme in ['http', 'https']:
return self._get_http(url)
if url.scheme == 'telnet':
return self._get_telnet(url)
def __sub__(self, other):
return diff(other, self)
def _get_file(self, path):
try:
return open(path).read()
except Exception as e:
raise TopologyRetrievalError(e)
def _get_http(self, url):
try:
response = requests.get(url.geturl(),
verify=self.verify,
timeout=self.timeout)
except Exception as e:
raise TopologyRetrievalError(e)
if response.status_code != 200:
msg = 'Expecting HTTP 200 ok, got {0}'.format(response.status_code)
raise TopologyRetrievalError(msg)
return response.content.decode()
def _get_telnet(self, url):
try:
tn = telnetlib.Telnet(url.hostname, url.port, timeout=self.timeout)
except Exception as e:
raise TopologyRetrievalError(e)
tn.write(("\r\n").encode('ascii'))
data = tn.read_all().decode('ascii')
tn.close()
return data
def _init_graph(self):
return networkx.DiGraph() if self.directed else networkx.Graph()
def parse(self, data):
"""
Converts the original python data structure into a NetworkX Graph object
Must be implemented by subclasses.
Must return an instance of <networkx.Graph>
"""
raise NotImplementedError()
def json(self, dict=False, **kwargs):
"""
Outputs NetJSON format
"""
try:
graph = self.graph
except AttributeError:
raise NotImplementedError()
return _netjson_networkgraph(self.protocol,
self.version,
self.revision,
self.metric,
graph.nodes(data=True),
graph.edges(data=True),
dict,
**kwargs)
|
openwisp/netdiff | netdiff/parsers/base.py | BaseParser.json | python | def json(self, dict=False, **kwargs):
try:
graph = self.graph
except AttributeError:
raise NotImplementedError()
return _netjson_networkgraph(self.protocol,
self.version,
self.revision,
self.metric,
graph.nodes(data=True),
graph.edges(data=True),
dict,
**kwargs) | Outputs NetJSON format | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/base.py#L135-L150 | [
"def _netjson_networkgraph(protocol, version, revision, metric,\n nodes, links,\n dict=False, **kwargs):\n # netjson format validity check\n if protocol is None:\n raise NetJsonError('protocol cannot be None')\n if version is None and protocol != 'static':\n raise NetJsonError('version cannot be None except when protocol is \"static\"')\n if metric is None and protocol != 'static':\n raise NetJsonError('metric cannot be None except when protocol is \"static\"')\n # prepare nodes\n node_list = []\n for node in nodes:\n netjson_node = OrderedDict({'id': node[0]})\n # must copy properties dict to avoid modifying data\n properties = node[1].copy()\n local_addresses = properties.pop('local_addresses', None)\n label = properties.pop('label', None)\n # append local_addresses only if not empty\n if local_addresses:\n netjson_node['local_addresses'] = local_addresses\n # append properties only if not empty\n if properties:\n netjson_node['properties'] = properties\n # append label only if not empty\n if label:\n netjson_node['label'] = label\n node_list.append(netjson_node)\n # prepare links\n link_list = []\n for link in links:\n # must copy properties dict to avoid modifying data\n properties = link[2].copy()\n cost = properties.pop('weight')\n netjson_link = OrderedDict((\n ('source', link[0]),\n ('target', link[1]),\n ('cost', cost)\n ))\n # append properties only if not empty\n if properties:\n netjson_link['properties'] = properties\n link_list.append(netjson_link)\n data = OrderedDict((\n ('type', 'NetworkGraph'),\n ('protocol', protocol),\n ('version', version),\n ('revision', revision),\n ('metric', metric),\n ('nodes', node_list),\n ('links', link_list)\n ))\n if dict:\n return data\n return json.dumps(data, **kwargs)\n"
] | class BaseParser(object):
"""
Base Class for Parsers
To create a parser, extend this class
and implement a parse method
"""
protocol = None
version = None
revision = None
metric = None
def __init__(self, data=None, url=None, file=None,
version=None, revision=None, metric=None,
timeout=None, verify=True, directed=False): # noqa
"""
Initializes a new Parser
:param data: ``str`` or ``dict`` containing topology data
:param url: HTTP URL to retrieve topology data
:param file: path to file containing topology data
:param version: routing protocol version
:param revision: routing protocol revision
:param metric: routing protocol metric
:param timeout: timeout in seconds for HTTP or telnet requests
:param verify: boolean (valid for HTTPS requests only)
:param directed: whether the resulting graph should be directed
(undirected by default for backwards compatibility)
"""
if version:
self.version = version
if revision:
self.revision = revision
if metric:
self.metric = metric
self.timeout = timeout
self.verify = verify
self.directed = directed
if data is None and url is not None:
data = self._get_url(url)
elif data is None and file is not None:
data = self._get_file(file)
elif data is None and url is None and file is None:
raise ValueError('no topology data supplied, on of the following arguments'
'must be supplied: data, url or file')
self.original_data = self.to_python(data)
# avoid throwing NotImplementedError in tests
if self.__class__ is not BaseParser:
self.graph = self.parse(self.original_data)
def _get_url(self, url):
url = urlparse.urlparse(url)
if url.scheme in ['http', 'https']:
return self._get_http(url)
if url.scheme == 'telnet':
return self._get_telnet(url)
def __sub__(self, other):
return diff(other, self)
def to_python(self, data):
"""
Parses the input data and converts it into a Python data structure
Input data might be:
* a path which points to a JSON file
* a URL which points to a JSON file
(supported schemes: http, https, telnet)
* a JSON formatted string
* a dict representing a JSON structure
"""
if isinstance(data, dict):
return data
elif isinstance(data, six.string_types):
# assuming is JSON
try:
return json.loads(data)
except ValueError:
pass
raise ConversionException('Could not recognize format', data=data)
def _get_file(self, path):
try:
return open(path).read()
except Exception as e:
raise TopologyRetrievalError(e)
def _get_http(self, url):
try:
response = requests.get(url.geturl(),
verify=self.verify,
timeout=self.timeout)
except Exception as e:
raise TopologyRetrievalError(e)
if response.status_code != 200:
msg = 'Expecting HTTP 200 ok, got {0}'.format(response.status_code)
raise TopologyRetrievalError(msg)
return response.content.decode()
def _get_telnet(self, url):
try:
tn = telnetlib.Telnet(url.hostname, url.port, timeout=self.timeout)
except Exception as e:
raise TopologyRetrievalError(e)
tn.write(("\r\n").encode('ascii'))
data = tn.read_all().decode('ascii')
tn.close()
return data
def _init_graph(self):
return networkx.DiGraph() if self.directed else networkx.Graph()
def parse(self, data):
"""
Converts the original python data structure into a NetworkX Graph object
Must be implemented by subclasses.
Must return an instance of <networkx.Graph>
"""
raise NotImplementedError()
|
openwisp/netdiff | netdiff/utils.py | diff | python | def diff(old, new):
protocol = new.protocol
version = new.version
revision = new.revision
metric = new.metric
# calculate differences
in_both = _find_unchanged(old.graph, new.graph)
added_nodes, added_edges = _make_diff(old.graph, new.graph, in_both)
removed_nodes, removed_edges = _make_diff(new.graph, old.graph, in_both)
changed_edges = _find_changed(old.graph, new.graph, in_both)
# create netjson objects
# or assign None if no changes
if added_nodes.nodes() or added_edges.edges():
added = _netjson_networkgraph(protocol, version, revision, metric,
added_nodes.nodes(data=True),
added_edges.edges(data=True),
dict=True)
else:
added = None
if removed_nodes.nodes() or removed_edges.edges():
removed = _netjson_networkgraph(protocol, version, revision, metric,
removed_nodes.nodes(data=True),
removed_edges.edges(data=True),
dict=True)
else:
removed = None
if changed_edges:
changed = _netjson_networkgraph(protocol, version, revision, metric,
[],
changed_edges,
dict=True)
else:
changed = None
return OrderedDict((
('added', added),
('removed', removed),
('changed', changed)
)) | Returns differences of two network topologies old and new
in NetJSON NetworkGraph compatible format | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/utils.py#L7-L48 | [
"def _netjson_networkgraph(protocol, version, revision, metric,\n nodes, links,\n dict=False, **kwargs):\n # netjson format validity check\n if protocol is None:\n raise NetJsonError('protocol cannot be None')\n if version is None and protocol != 'static':\n raise NetJsonError('version cannot be None except when protocol is \"static\"')\n if metric is None and protocol != 'static':\n raise NetJsonError('metric cannot be None except when protocol is \"static\"')\n # prepare nodes\n node_list = []\n for node in nodes:\n netjson_node = OrderedDict({'id': node[0]})\n # must copy properties dict to avoid modifying data\n properties = node[1].copy()\n local_addresses = properties.pop('local_addresses', None)\n label = properties.pop('label', None)\n # append local_addresses only if not empty\n if local_addresses:\n netjson_node['local_addresses'] = local_addresses\n # append properties only if not empty\n if properties:\n netjson_node['properties'] = properties\n # append label only if not empty\n if label:\n netjson_node['label'] = label\n node_list.append(netjson_node)\n # prepare links\n link_list = []\n for link in links:\n # must copy properties dict to avoid modifying data\n properties = link[2].copy()\n cost = properties.pop('weight')\n netjson_link = OrderedDict((\n ('source', link[0]),\n ('target', link[1]),\n ('cost', cost)\n ))\n # append properties only if not empty\n if properties:\n netjson_link['properties'] = properties\n link_list.append(netjson_link)\n data = OrderedDict((\n ('type', 'NetworkGraph'),\n ('protocol', protocol),\n ('version', version),\n ('revision', revision),\n ('metric', metric),\n ('nodes', node_list),\n ('links', link_list)\n ))\n if dict:\n return data\n return json.dumps(data, **kwargs)\n",
"def _find_unchanged(old, new):\n \"\"\"\n returns edges that are in both old and new\n \"\"\"\n edges = []\n old_edges = [set(edge) for edge in old.edges()]\n new_edges = [set(edge) for edge in new.edges()]\n for old_edge in old_edges:\n if old_edge in new_edges:\n edges.append(set(old_edge))\n return edges\n",
"def _make_diff(old, new, both):\n \"\"\"\n calculates differences between topologies 'old' and 'new'\n returns a tuple with two network graph objects\n the first graph contains the added nodes, the secnod contains the added links\n \"\"\"\n # make a copy of old topology to avoid tampering with it\n diff_edges = new.copy()\n not_different = [tuple(edge) for edge in both]\n diff_edges.remove_edges_from(not_different)\n # repeat operation with nodes\n diff_nodes = new.copy()\n not_different = []\n for new_node in new.nodes():\n if new_node in old.nodes():\n not_different.append(new_node)\n diff_nodes.remove_nodes_from(not_different)\n # return tuple with modified graphs\n # one for nodes and one for links\n return diff_nodes, diff_edges\n",
"def _find_changed(old, new, both):\n \"\"\"\n returns links that have changed cost\n \"\"\"\n # create two list of sets of old and new edges including cost\n old_edges = []\n for edge in old.edges(data=True):\n # skip links that are not in both\n if set((edge[0], edge[1])) not in both:\n continue\n # wrap cost in tuple so it will be recognizable\n cost = (edge[2]['weight'],)\n old_edges.append(set((edge[0], edge[1], cost)))\n new_edges = []\n for edge in new.edges(data=True):\n # skip links that are not in both\n if set((edge[0], edge[1])) not in both:\n continue\n # wrap cost in tuple so it will be recognizable\n cost = (edge[2]['weight'],)\n new_edges.append(set((edge[0], edge[1], cost)))\n # find out which edge changed\n changed = []\n for new_edge in new_edges:\n if new_edge not in old_edges:\n # new_edge is a set, convert it to list\n new_edge = list(new_edge)\n for item in new_edge:\n if isinstance(item, tuple):\n # unwrap cost from tuple and put it in a dict\n cost = {'weight': item[0]}\n new_edge.remove(item)\n changed.append((new_edge[0], new_edge[1], cost))\n return changed\n"
] | import json
from collections import OrderedDict
from .exceptions import NetJsonError
def _make_diff(old, new, both):
"""
calculates differences between topologies 'old' and 'new'
returns a tuple with two network graph objects
the first graph contains the added nodes, the secnod contains the added links
"""
# make a copy of old topology to avoid tampering with it
diff_edges = new.copy()
not_different = [tuple(edge) for edge in both]
diff_edges.remove_edges_from(not_different)
# repeat operation with nodes
diff_nodes = new.copy()
not_different = []
for new_node in new.nodes():
if new_node in old.nodes():
not_different.append(new_node)
diff_nodes.remove_nodes_from(not_different)
# return tuple with modified graphs
# one for nodes and one for links
return diff_nodes, diff_edges
def _find_unchanged(old, new):
"""
returns edges that are in both old and new
"""
edges = []
old_edges = [set(edge) for edge in old.edges()]
new_edges = [set(edge) for edge in new.edges()]
for old_edge in old_edges:
if old_edge in new_edges:
edges.append(set(old_edge))
return edges
def _find_changed(old, new, both):
"""
returns links that have changed cost
"""
# create two list of sets of old and new edges including cost
old_edges = []
for edge in old.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
old_edges.append(set((edge[0], edge[1], cost)))
new_edges = []
for edge in new.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
new_edges.append(set((edge[0], edge[1], cost)))
# find out which edge changed
changed = []
for new_edge in new_edges:
if new_edge not in old_edges:
# new_edge is a set, convert it to list
new_edge = list(new_edge)
for item in new_edge:
if isinstance(item, tuple):
# unwrap cost from tuple and put it in a dict
cost = {'weight': item[0]}
new_edge.remove(item)
changed.append((new_edge[0], new_edge[1], cost))
return changed
def _netjson_networkgraph(protocol, version, revision, metric,
nodes, links,
dict=False, **kwargs):
# netjson format validity check
if protocol is None:
raise NetJsonError('protocol cannot be None')
if version is None and protocol != 'static':
raise NetJsonError('version cannot be None except when protocol is "static"')
if metric is None and protocol != 'static':
raise NetJsonError('metric cannot be None except when protocol is "static"')
# prepare nodes
node_list = []
for node in nodes:
netjson_node = OrderedDict({'id': node[0]})
# must copy properties dict to avoid modifying data
properties = node[1].copy()
local_addresses = properties.pop('local_addresses', None)
label = properties.pop('label', None)
# append local_addresses only if not empty
if local_addresses:
netjson_node['local_addresses'] = local_addresses
# append properties only if not empty
if properties:
netjson_node['properties'] = properties
# append label only if not empty
if label:
netjson_node['label'] = label
node_list.append(netjson_node)
# prepare links
link_list = []
for link in links:
# must copy properties dict to avoid modifying data
properties = link[2].copy()
cost = properties.pop('weight')
netjson_link = OrderedDict((
('source', link[0]),
('target', link[1]),
('cost', cost)
))
# append properties only if not empty
if properties:
netjson_link['properties'] = properties
link_list.append(netjson_link)
data = OrderedDict((
('type', 'NetworkGraph'),
('protocol', protocol),
('version', version),
('revision', revision),
('metric', metric),
('nodes', node_list),
('links', link_list)
))
if dict:
return data
return json.dumps(data, **kwargs)
|
openwisp/netdiff | netdiff/utils.py | _make_diff | python | def _make_diff(old, new, both):
# make a copy of old topology to avoid tampering with it
diff_edges = new.copy()
not_different = [tuple(edge) for edge in both]
diff_edges.remove_edges_from(not_different)
# repeat operation with nodes
diff_nodes = new.copy()
not_different = []
for new_node in new.nodes():
if new_node in old.nodes():
not_different.append(new_node)
diff_nodes.remove_nodes_from(not_different)
# return tuple with modified graphs
# one for nodes and one for links
return diff_nodes, diff_edges | calculates differences between topologies 'old' and 'new'
returns a tuple with two network graph objects
the first graph contains the added nodes, the secnod contains the added links | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/utils.py#L51-L70 | null | import json
from collections import OrderedDict
from .exceptions import NetJsonError
def diff(old, new):
"""
Returns differences of two network topologies old and new
in NetJSON NetworkGraph compatible format
"""
protocol = new.protocol
version = new.version
revision = new.revision
metric = new.metric
# calculate differences
in_both = _find_unchanged(old.graph, new.graph)
added_nodes, added_edges = _make_diff(old.graph, new.graph, in_both)
removed_nodes, removed_edges = _make_diff(new.graph, old.graph, in_both)
changed_edges = _find_changed(old.graph, new.graph, in_both)
# create netjson objects
# or assign None if no changes
if added_nodes.nodes() or added_edges.edges():
added = _netjson_networkgraph(protocol, version, revision, metric,
added_nodes.nodes(data=True),
added_edges.edges(data=True),
dict=True)
else:
added = None
if removed_nodes.nodes() or removed_edges.edges():
removed = _netjson_networkgraph(protocol, version, revision, metric,
removed_nodes.nodes(data=True),
removed_edges.edges(data=True),
dict=True)
else:
removed = None
if changed_edges:
changed = _netjson_networkgraph(protocol, version, revision, metric,
[],
changed_edges,
dict=True)
else:
changed = None
return OrderedDict((
('added', added),
('removed', removed),
('changed', changed)
))
def _find_unchanged(old, new):
"""
returns edges that are in both old and new
"""
edges = []
old_edges = [set(edge) for edge in old.edges()]
new_edges = [set(edge) for edge in new.edges()]
for old_edge in old_edges:
if old_edge in new_edges:
edges.append(set(old_edge))
return edges
def _find_changed(old, new, both):
"""
returns links that have changed cost
"""
# create two list of sets of old and new edges including cost
old_edges = []
for edge in old.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
old_edges.append(set((edge[0], edge[1], cost)))
new_edges = []
for edge in new.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
new_edges.append(set((edge[0], edge[1], cost)))
# find out which edge changed
changed = []
for new_edge in new_edges:
if new_edge not in old_edges:
# new_edge is a set, convert it to list
new_edge = list(new_edge)
for item in new_edge:
if isinstance(item, tuple):
# unwrap cost from tuple and put it in a dict
cost = {'weight': item[0]}
new_edge.remove(item)
changed.append((new_edge[0], new_edge[1], cost))
return changed
def _netjson_networkgraph(protocol, version, revision, metric,
nodes, links,
dict=False, **kwargs):
# netjson format validity check
if protocol is None:
raise NetJsonError('protocol cannot be None')
if version is None and protocol != 'static':
raise NetJsonError('version cannot be None except when protocol is "static"')
if metric is None and protocol != 'static':
raise NetJsonError('metric cannot be None except when protocol is "static"')
# prepare nodes
node_list = []
for node in nodes:
netjson_node = OrderedDict({'id': node[0]})
# must copy properties dict to avoid modifying data
properties = node[1].copy()
local_addresses = properties.pop('local_addresses', None)
label = properties.pop('label', None)
# append local_addresses only if not empty
if local_addresses:
netjson_node['local_addresses'] = local_addresses
# append properties only if not empty
if properties:
netjson_node['properties'] = properties
# append label only if not empty
if label:
netjson_node['label'] = label
node_list.append(netjson_node)
# prepare links
link_list = []
for link in links:
# must copy properties dict to avoid modifying data
properties = link[2].copy()
cost = properties.pop('weight')
netjson_link = OrderedDict((
('source', link[0]),
('target', link[1]),
('cost', cost)
))
# append properties only if not empty
if properties:
netjson_link['properties'] = properties
link_list.append(netjson_link)
data = OrderedDict((
('type', 'NetworkGraph'),
('protocol', protocol),
('version', version),
('revision', revision),
('metric', metric),
('nodes', node_list),
('links', link_list)
))
if dict:
return data
return json.dumps(data, **kwargs)
|
openwisp/netdiff | netdiff/utils.py | _find_unchanged | python | def _find_unchanged(old, new):
edges = []
old_edges = [set(edge) for edge in old.edges()]
new_edges = [set(edge) for edge in new.edges()]
for old_edge in old_edges:
if old_edge in new_edges:
edges.append(set(old_edge))
return edges | returns edges that are in both old and new | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/utils.py#L73-L83 | null | import json
from collections import OrderedDict
from .exceptions import NetJsonError
def diff(old, new):
"""
Returns differences of two network topologies old and new
in NetJSON NetworkGraph compatible format
"""
protocol = new.protocol
version = new.version
revision = new.revision
metric = new.metric
# calculate differences
in_both = _find_unchanged(old.graph, new.graph)
added_nodes, added_edges = _make_diff(old.graph, new.graph, in_both)
removed_nodes, removed_edges = _make_diff(new.graph, old.graph, in_both)
changed_edges = _find_changed(old.graph, new.graph, in_both)
# create netjson objects
# or assign None if no changes
if added_nodes.nodes() or added_edges.edges():
added = _netjson_networkgraph(protocol, version, revision, metric,
added_nodes.nodes(data=True),
added_edges.edges(data=True),
dict=True)
else:
added = None
if removed_nodes.nodes() or removed_edges.edges():
removed = _netjson_networkgraph(protocol, version, revision, metric,
removed_nodes.nodes(data=True),
removed_edges.edges(data=True),
dict=True)
else:
removed = None
if changed_edges:
changed = _netjson_networkgraph(protocol, version, revision, metric,
[],
changed_edges,
dict=True)
else:
changed = None
return OrderedDict((
('added', added),
('removed', removed),
('changed', changed)
))
def _make_diff(old, new, both):
"""
calculates differences between topologies 'old' and 'new'
returns a tuple with two network graph objects
the first graph contains the added nodes, the secnod contains the added links
"""
# make a copy of old topology to avoid tampering with it
diff_edges = new.copy()
not_different = [tuple(edge) for edge in both]
diff_edges.remove_edges_from(not_different)
# repeat operation with nodes
diff_nodes = new.copy()
not_different = []
for new_node in new.nodes():
if new_node in old.nodes():
not_different.append(new_node)
diff_nodes.remove_nodes_from(not_different)
# return tuple with modified graphs
# one for nodes and one for links
return diff_nodes, diff_edges
def _find_changed(old, new, both):
"""
returns links that have changed cost
"""
# create two list of sets of old and new edges including cost
old_edges = []
for edge in old.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
old_edges.append(set((edge[0], edge[1], cost)))
new_edges = []
for edge in new.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
new_edges.append(set((edge[0], edge[1], cost)))
# find out which edge changed
changed = []
for new_edge in new_edges:
if new_edge not in old_edges:
# new_edge is a set, convert it to list
new_edge = list(new_edge)
for item in new_edge:
if isinstance(item, tuple):
# unwrap cost from tuple and put it in a dict
cost = {'weight': item[0]}
new_edge.remove(item)
changed.append((new_edge[0], new_edge[1], cost))
return changed
def _netjson_networkgraph(protocol, version, revision, metric,
nodes, links,
dict=False, **kwargs):
# netjson format validity check
if protocol is None:
raise NetJsonError('protocol cannot be None')
if version is None and protocol != 'static':
raise NetJsonError('version cannot be None except when protocol is "static"')
if metric is None and protocol != 'static':
raise NetJsonError('metric cannot be None except when protocol is "static"')
# prepare nodes
node_list = []
for node in nodes:
netjson_node = OrderedDict({'id': node[0]})
# must copy properties dict to avoid modifying data
properties = node[1].copy()
local_addresses = properties.pop('local_addresses', None)
label = properties.pop('label', None)
# append local_addresses only if not empty
if local_addresses:
netjson_node['local_addresses'] = local_addresses
# append properties only if not empty
if properties:
netjson_node['properties'] = properties
# append label only if not empty
if label:
netjson_node['label'] = label
node_list.append(netjson_node)
# prepare links
link_list = []
for link in links:
# must copy properties dict to avoid modifying data
properties = link[2].copy()
cost = properties.pop('weight')
netjson_link = OrderedDict((
('source', link[0]),
('target', link[1]),
('cost', cost)
))
# append properties only if not empty
if properties:
netjson_link['properties'] = properties
link_list.append(netjson_link)
data = OrderedDict((
('type', 'NetworkGraph'),
('protocol', protocol),
('version', version),
('revision', revision),
('metric', metric),
('nodes', node_list),
('links', link_list)
))
if dict:
return data
return json.dumps(data, **kwargs)
|
openwisp/netdiff | netdiff/utils.py | _find_changed | python | def _find_changed(old, new, both):
# create two list of sets of old and new edges including cost
old_edges = []
for edge in old.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
old_edges.append(set((edge[0], edge[1], cost)))
new_edges = []
for edge in new.edges(data=True):
# skip links that are not in both
if set((edge[0], edge[1])) not in both:
continue
# wrap cost in tuple so it will be recognizable
cost = (edge[2]['weight'],)
new_edges.append(set((edge[0], edge[1], cost)))
# find out which edge changed
changed = []
for new_edge in new_edges:
if new_edge not in old_edges:
# new_edge is a set, convert it to list
new_edge = list(new_edge)
for item in new_edge:
if isinstance(item, tuple):
# unwrap cost from tuple and put it in a dict
cost = {'weight': item[0]}
new_edge.remove(item)
changed.append((new_edge[0], new_edge[1], cost))
return changed | returns links that have changed cost | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/utils.py#L86-L119 | null | import json
from collections import OrderedDict
from .exceptions import NetJsonError
def diff(old, new):
"""
Returns differences of two network topologies old and new
in NetJSON NetworkGraph compatible format
"""
protocol = new.protocol
version = new.version
revision = new.revision
metric = new.metric
# calculate differences
in_both = _find_unchanged(old.graph, new.graph)
added_nodes, added_edges = _make_diff(old.graph, new.graph, in_both)
removed_nodes, removed_edges = _make_diff(new.graph, old.graph, in_both)
changed_edges = _find_changed(old.graph, new.graph, in_both)
# create netjson objects
# or assign None if no changes
if added_nodes.nodes() or added_edges.edges():
added = _netjson_networkgraph(protocol, version, revision, metric,
added_nodes.nodes(data=True),
added_edges.edges(data=True),
dict=True)
else:
added = None
if removed_nodes.nodes() or removed_edges.edges():
removed = _netjson_networkgraph(protocol, version, revision, metric,
removed_nodes.nodes(data=True),
removed_edges.edges(data=True),
dict=True)
else:
removed = None
if changed_edges:
changed = _netjson_networkgraph(protocol, version, revision, metric,
[],
changed_edges,
dict=True)
else:
changed = None
return OrderedDict((
('added', added),
('removed', removed),
('changed', changed)
))
def _make_diff(old, new, both):
"""
calculates differences between topologies 'old' and 'new'
returns a tuple with two network graph objects
the first graph contains the added nodes, the secnod contains the added links
"""
# make a copy of old topology to avoid tampering with it
diff_edges = new.copy()
not_different = [tuple(edge) for edge in both]
diff_edges.remove_edges_from(not_different)
# repeat operation with nodes
diff_nodes = new.copy()
not_different = []
for new_node in new.nodes():
if new_node in old.nodes():
not_different.append(new_node)
diff_nodes.remove_nodes_from(not_different)
# return tuple with modified graphs
# one for nodes and one for links
return diff_nodes, diff_edges
def _find_unchanged(old, new):
"""
returns edges that are in both old and new
"""
edges = []
old_edges = [set(edge) for edge in old.edges()]
new_edges = [set(edge) for edge in new.edges()]
for old_edge in old_edges:
if old_edge in new_edges:
edges.append(set(old_edge))
return edges
def _netjson_networkgraph(protocol, version, revision, metric,
nodes, links,
dict=False, **kwargs):
# netjson format validity check
if protocol is None:
raise NetJsonError('protocol cannot be None')
if version is None and protocol != 'static':
raise NetJsonError('version cannot be None except when protocol is "static"')
if metric is None and protocol != 'static':
raise NetJsonError('metric cannot be None except when protocol is "static"')
# prepare nodes
node_list = []
for node in nodes:
netjson_node = OrderedDict({'id': node[0]})
# must copy properties dict to avoid modifying data
properties = node[1].copy()
local_addresses = properties.pop('local_addresses', None)
label = properties.pop('label', None)
# append local_addresses only if not empty
if local_addresses:
netjson_node['local_addresses'] = local_addresses
# append properties only if not empty
if properties:
netjson_node['properties'] = properties
# append label only if not empty
if label:
netjson_node['label'] = label
node_list.append(netjson_node)
# prepare links
link_list = []
for link in links:
# must copy properties dict to avoid modifying data
properties = link[2].copy()
cost = properties.pop('weight')
netjson_link = OrderedDict((
('source', link[0]),
('target', link[1]),
('cost', cost)
))
# append properties only if not empty
if properties:
netjson_link['properties'] = properties
link_list.append(netjson_link)
data = OrderedDict((
('type', 'NetworkGraph'),
('protocol', protocol),
('version', version),
('revision', revision),
('metric', metric),
('nodes', node_list),
('links', link_list)
))
if dict:
return data
return json.dumps(data, **kwargs)
|
openwisp/netdiff | netdiff/parsers/bmx6.py | Bmx6Parser.parse | python | def parse(self, data):
# initialize graph and list of aggregated nodes
graph = self._init_graph()
if len(data) != 0:
if "links" not in data[0]:
raise ParserError('Parse error, "links" key not found')
# loop over topology section and create networkx graph
# this data structure does not contain cost information, so we set it as 1
for node in data:
for link in node['links']:
cost = (link['txRate'] + link['rxRate']) / 2.0
graph.add_edge(node['name'],
link['name'],
weight=cost,
tx_rate=link['txRate'],
rx_rate=link['rxRate'])
return graph | Converts a BMX6 b6m JSON to a NetworkX Graph object
which is then returned. | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/bmx6.py#L11-L31 | [
"def _init_graph(self):\n return networkx.DiGraph() if self.directed else networkx.Graph()\n"
] | class Bmx6Parser(BaseParser):
""" Bmx6_b6m parser """
protocol = 'BMX6_b6m'
version = '0'
metric = 'none'
|
openwisp/netdiff | netdiff/parsers/cnml.py | CnmlParser.parse | python | def parse(self, data):
graph = self._init_graph()
# loop over links and create networkx graph
# Add only working nodes with working links
for link in data.get_inner_links():
if link.status != libcnml.libcnml.Status.WORKING:
continue
interface_a, interface_b = link.getLinkedInterfaces()
source = interface_a.ipv4
dest = interface_b.ipv4
# add link to Graph
graph.add_edge(source, dest, weight=1)
return graph | Converts a CNML structure to a NetworkX Graph object
which is then returned. | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/cnml.py#L34-L50 | [
"def _init_graph(self):\n return networkx.DiGraph() if self.directed else networkx.Graph()\n"
] | class CnmlParser(BaseParser):
""" CNML 0.1 parser """
protocol = 'static'
version = None
metric = None
def to_python(self, data):
if isinstance(data, six.string_types):
up = urlparse.urlparse(data)
# if it looks like a file path
if os.path.isfile(data) or up.scheme in ['http', 'https']:
return libcnml.CNMLParser(data)
else:
raise ParserError('Could not decode CNML data')
elif isinstance(data, libcnml.CNMLParser):
return data
else:
raise ParserError('Could not find valid data to parse')
|
openwisp/netdiff | netdiff/parsers/olsr.py | OlsrParser.to_python | python | def to_python(self, data):
try:
return super(OlsrParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_jsoninfo(e.data) | Adds support for txtinfo format | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/olsr.py#L11-L18 | [
"def to_python(self, data):\n \"\"\"\n Parses the input data and converts it into a Python data structure\n Input data might be:\n * a path which points to a JSON file\n * a URL which points to a JSON file\n (supported schemes: http, https, telnet)\n * a JSON formatted string\n * a dict representing a JSON structure\n \"\"\"\n if isinstance(data, dict):\n return data\n elif isinstance(data, six.string_types):\n # assuming is JSON\n try:\n return json.loads(data)\n except ValueError:\n pass\n raise ConversionException('Could not recognize format', data=data)\n",
"def _txtinfo_to_jsoninfo(self, data):\n \"\"\"\n converts olsr 1 txtinfo format to jsoninfo\n \"\"\"\n # replace INFINITE with inf, which is convertible to float\n data = data.replace('INFINITE', 'inf')\n # find interesting section\n lines = data.split('\\n')\n\n # process links in topology section\n try:\n start = lines.index('Table: Topology') + 2\n end = lines[start:].index('') + start\n except ValueError:\n raise ParserError('Unrecognized format')\n topology_lines = lines[start:end]\n # convert topology section to jsoninfo format\n topology = []\n for line in topology_lines:\n values = line.split('\\t')\n topology.append({\n 'destinationIP': values[0],\n 'lastHopIP': values[1],\n 'linkQuality': float(values[2]),\n 'neighborLinkQuality': float(values[3]),\n 'tcEdgeCost': float(values[4]) * 1024.0\n })\n\n # process alias (MID) section\n try:\n start = lines.index('Table: MID') + 2\n end = lines[start:].index('') + start\n except ValueError:\n raise ParserError('Unrecognized format')\n mid_lines = lines[start:end]\n # convert mid section to jsoninfo format\n mid = []\n for line in mid_lines:\n values = line.split('\\t')\n node = values[0]\n aliases = values[1].split(';')\n mid.append({\n 'ipAddress': node,\n 'aliases': [{'ipAddress': alias} for alias in aliases]\n })\n\n return {\n 'topology': topology,\n 'mid': mid\n }\n"
] | class OlsrParser(BaseParser):
""" OLSR 1 jsoninfo parser """
protocol = 'OLSR'
version = '0.8'
metric = 'ETX'
def parse(self, data):
"""
Converts a dict representing an OLSR 0.6.x topology
to a NetworkX Graph object, which is then returned.
Additionally checks for "config" data in order to determine version and revision.
"""
graph = self._init_graph()
if 'topology' not in data:
raise ParserError('Parse error, "topology" key not found')
elif 'mid' not in data:
raise ParserError('Parse error, "mid" key not found')
# determine version and revision
if 'config' in data:
version_info = data['config']['olsrdVersion'].replace(' ', '').split('-')
self.version = version_info[1]
# try to get only the git hash
if 'hash_' in version_info[-1]:
version_info[-1] = version_info[-1].split('hash_')[-1]
self.revision = version_info[-1]
# process alias list
alias_dict = {}
for node in data['mid']:
local_addresses = [alias['ipAddress'] for alias in node['aliases']]
alias_dict[node['ipAddress']] = local_addresses
# loop over topology section and create networkx graph
for link in data['topology']:
try:
source = link['lastHopIP']
target = link['destinationIP']
cost = link['tcEdgeCost']
properties = {
'link_quality': link['linkQuality'],
'neighbor_link_quality': link['neighborLinkQuality']
}
except KeyError as e:
raise ParserError('Parse error, "%s" key not found' % e)
# add nodes with their local_addresses
for node in [source, target]:
if node not in alias_dict:
continue
graph.add_node(node, local_addresses=alias_dict[node])
# skip links with infinite cost
if cost == float('inf'):
continue
# original olsrd cost (jsoninfo multiplies by 1024)
cost = float(cost) / 1024.0
# add link to Graph
graph.add_edge(source, target, weight=cost, **properties)
return graph
def _txtinfo_to_jsoninfo(self, data):
"""
converts olsr 1 txtinfo format to jsoninfo
"""
# replace INFINITE with inf, which is convertible to float
data = data.replace('INFINITE', 'inf')
# find interesting section
lines = data.split('\n')
# process links in topology section
try:
start = lines.index('Table: Topology') + 2
end = lines[start:].index('') + start
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = lines[start:end]
# convert topology section to jsoninfo format
topology = []
for line in topology_lines:
values = line.split('\t')
topology.append({
'destinationIP': values[0],
'lastHopIP': values[1],
'linkQuality': float(values[2]),
'neighborLinkQuality': float(values[3]),
'tcEdgeCost': float(values[4]) * 1024.0
})
# process alias (MID) section
try:
start = lines.index('Table: MID') + 2
end = lines[start:].index('') + start
except ValueError:
raise ParserError('Unrecognized format')
mid_lines = lines[start:end]
# convert mid section to jsoninfo format
mid = []
for line in mid_lines:
values = line.split('\t')
node = values[0]
aliases = values[1].split(';')
mid.append({
'ipAddress': node,
'aliases': [{'ipAddress': alias} for alias in aliases]
})
return {
'topology': topology,
'mid': mid
}
|
openwisp/netdiff | netdiff/parsers/olsr.py | OlsrParser.parse | python | def parse(self, data):
graph = self._init_graph()
if 'topology' not in data:
raise ParserError('Parse error, "topology" key not found')
elif 'mid' not in data:
raise ParserError('Parse error, "mid" key not found')
# determine version and revision
if 'config' in data:
version_info = data['config']['olsrdVersion'].replace(' ', '').split('-')
self.version = version_info[1]
# try to get only the git hash
if 'hash_' in version_info[-1]:
version_info[-1] = version_info[-1].split('hash_')[-1]
self.revision = version_info[-1]
# process alias list
alias_dict = {}
for node in data['mid']:
local_addresses = [alias['ipAddress'] for alias in node['aliases']]
alias_dict[node['ipAddress']] = local_addresses
# loop over topology section and create networkx graph
for link in data['topology']:
try:
source = link['lastHopIP']
target = link['destinationIP']
cost = link['tcEdgeCost']
properties = {
'link_quality': link['linkQuality'],
'neighbor_link_quality': link['neighborLinkQuality']
}
except KeyError as e:
raise ParserError('Parse error, "%s" key not found' % e)
# add nodes with their local_addresses
for node in [source, target]:
if node not in alias_dict:
continue
graph.add_node(node, local_addresses=alias_dict[node])
# skip links with infinite cost
if cost == float('inf'):
continue
# original olsrd cost (jsoninfo multiplies by 1024)
cost = float(cost) / 1024.0
# add link to Graph
graph.add_edge(source, target, weight=cost, **properties)
return graph | Converts a dict representing an OLSR 0.6.x topology
to a NetworkX Graph object, which is then returned.
Additionally checks for "config" data in order to determine version and revision. | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/olsr.py#L20-L71 | [
"def _init_graph(self):\n return networkx.DiGraph() if self.directed else networkx.Graph()\n"
] | class OlsrParser(BaseParser):
""" OLSR 1 jsoninfo parser """
protocol = 'OLSR'
version = '0.8'
metric = 'ETX'
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(OlsrParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_jsoninfo(e.data)
def _txtinfo_to_jsoninfo(self, data):
"""
converts olsr 1 txtinfo format to jsoninfo
"""
# replace INFINITE with inf, which is convertible to float
data = data.replace('INFINITE', 'inf')
# find interesting section
lines = data.split('\n')
# process links in topology section
try:
start = lines.index('Table: Topology') + 2
end = lines[start:].index('') + start
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = lines[start:end]
# convert topology section to jsoninfo format
topology = []
for line in topology_lines:
values = line.split('\t')
topology.append({
'destinationIP': values[0],
'lastHopIP': values[1],
'linkQuality': float(values[2]),
'neighborLinkQuality': float(values[3]),
'tcEdgeCost': float(values[4]) * 1024.0
})
# process alias (MID) section
try:
start = lines.index('Table: MID') + 2
end = lines[start:].index('') + start
except ValueError:
raise ParserError('Unrecognized format')
mid_lines = lines[start:end]
# convert mid section to jsoninfo format
mid = []
for line in mid_lines:
values = line.split('\t')
node = values[0]
aliases = values[1].split(';')
mid.append({
'ipAddress': node,
'aliases': [{'ipAddress': alias} for alias in aliases]
})
return {
'topology': topology,
'mid': mid
}
|
openwisp/netdiff | netdiff/parsers/olsr.py | OlsrParser._txtinfo_to_jsoninfo | python | def _txtinfo_to_jsoninfo(self, data):
# replace INFINITE with inf, which is convertible to float
data = data.replace('INFINITE', 'inf')
# find interesting section
lines = data.split('\n')
# process links in topology section
try:
start = lines.index('Table: Topology') + 2
end = lines[start:].index('') + start
except ValueError:
raise ParserError('Unrecognized format')
topology_lines = lines[start:end]
# convert topology section to jsoninfo format
topology = []
for line in topology_lines:
values = line.split('\t')
topology.append({
'destinationIP': values[0],
'lastHopIP': values[1],
'linkQuality': float(values[2]),
'neighborLinkQuality': float(values[3]),
'tcEdgeCost': float(values[4]) * 1024.0
})
# process alias (MID) section
try:
start = lines.index('Table: MID') + 2
end = lines[start:].index('') + start
except ValueError:
raise ParserError('Unrecognized format')
mid_lines = lines[start:end]
# convert mid section to jsoninfo format
mid = []
for line in mid_lines:
values = line.split('\t')
node = values[0]
aliases = values[1].split(';')
mid.append({
'ipAddress': node,
'aliases': [{'ipAddress': alias} for alias in aliases]
})
return {
'topology': topology,
'mid': mid
} | converts olsr 1 txtinfo format to jsoninfo | train | https://github.com/openwisp/netdiff/blob/f7fda2ed78ad815b8c56eae27dfd193172fb23f5/netdiff/parsers/olsr.py#L73-L122 | null | class OlsrParser(BaseParser):
""" OLSR 1 jsoninfo parser """
protocol = 'OLSR'
version = '0.8'
metric = 'ETX'
def to_python(self, data):
"""
Adds support for txtinfo format
"""
try:
return super(OlsrParser, self).to_python(data)
except ConversionException as e:
return self._txtinfo_to_jsoninfo(e.data)
def parse(self, data):
"""
Converts a dict representing an OLSR 0.6.x topology
to a NetworkX Graph object, which is then returned.
Additionally checks for "config" data in order to determine version and revision.
"""
graph = self._init_graph()
if 'topology' not in data:
raise ParserError('Parse error, "topology" key not found')
elif 'mid' not in data:
raise ParserError('Parse error, "mid" key not found')
# determine version and revision
if 'config' in data:
version_info = data['config']['olsrdVersion'].replace(' ', '').split('-')
self.version = version_info[1]
# try to get only the git hash
if 'hash_' in version_info[-1]:
version_info[-1] = version_info[-1].split('hash_')[-1]
self.revision = version_info[-1]
# process alias list
alias_dict = {}
for node in data['mid']:
local_addresses = [alias['ipAddress'] for alias in node['aliases']]
alias_dict[node['ipAddress']] = local_addresses
# loop over topology section and create networkx graph
for link in data['topology']:
try:
source = link['lastHopIP']
target = link['destinationIP']
cost = link['tcEdgeCost']
properties = {
'link_quality': link['linkQuality'],
'neighbor_link_quality': link['neighborLinkQuality']
}
except KeyError as e:
raise ParserError('Parse error, "%s" key not found' % e)
# add nodes with their local_addresses
for node in [source, target]:
if node not in alias_dict:
continue
graph.add_node(node, local_addresses=alias_dict[node])
# skip links with infinite cost
if cost == float('inf'):
continue
# original olsrd cost (jsoninfo multiplies by 1024)
cost = float(cost) / 1024.0
# add link to Graph
graph.add_edge(source, target, weight=cost, **properties)
return graph
|
klen/python-scss | scss/function.py | unknown | python | def unknown(*args, **kwargs):
name = kwargs.get('name', '')
return "%s(%s)" % (name, ', '.join(str(a) for a in args)) | Unknow scss function handler.
Simple return 'funcname(args)' | train | https://github.com/klen/python-scss/blob/34fe985e6b43caa9f9b9bcd0dc433be4b2a1fdec/scss/function.py#L46-L51 | null | from __future__ import print_function
import base64
import colorsys
import math
import mimetypes
import os.path
import sys
from .compat import PY3
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
from . import OPRT, CONV_TYPE, ELEMENTS_OF_TYPE
from .value import (
NumberValue, StringValue, QuotedStringValue, ColorValue, BooleanValue,
hsl_op, rgba_op)
try:
from PIL import Image
except ImportError:
Image = None
IMAGES = dict()
def warn(warning):
""" Write warning messages in stderr.
"""
print("\nWarning: %s" % str(warning), file=sys.stderr)
def check_pil(func):
""" PIL module checking decorator.
"""
def __wrapper(*args, **kwargs):
root = kwargs.get('root')
if not Image:
if root and root.get_opt('warn'):
warn("Images manipulation require PIL")
return 'none'
return func(*args, **kwargs)
return __wrapper
# RGB functions
# =============
def _rgb(r, g, b, **kwargs):
""" Converts an rgb(red, green, blue) triplet into a color.
"""
return _rgba(r, g, b, 1.0)
def _rgba(r, g, b, a, **kwargs):
""" Converts an rgba(red, green, blue, alpha) quadruplet into a color.
"""
return ColorValue((float(r), float(g), float(b), float(a)))
def _red(color, **kwargs):
""" Gets the red component of a color.
"""
return NumberValue(color.value[0])
def _green(color, **kwargs):
""" Gets the green component of a color.
"""
return NumberValue(color.value[1])
def _blue(color, **kwargs):
""" Gets the blue component of a color.
"""
return NumberValue(color.value[2])
def _mix(color1, color2, weight=0.5, **kwargs):
""" Mixes two colors together.
"""
weight = float(weight)
c1 = color1.value
c2 = color2.value
p = 0.0 if weight < 0 else 1.0 if weight > 1 else weight
w = p * 2 - 1
a = c1[3] - c2[3]
w1 = ((w if (w * a == -1) else (w + a) / (1 + w * a)) + 1) / 2.0
w2 = 1 - w1
q = [w1, w1, w1, p]
r = [w2, w2, w2, 1 - p]
return ColorValue([c1[i] * q[i] + c2[i] * r[i] for i in range(4)])
# HSL functions
# =============
def _hsl(h, s, l, **kwargs):
""" HSL color value.
"""
return _hsla(h, s, l, 1.0)
def _hsla(h, s, l, a, **kwargs):
""" HSL with alpha channel color value.
"""
res = colorsys.hls_to_rgb(float(h), float(l), float(s))
return ColorValue([x * 255.0 for x in res] + [float(a)])
def _hue(color, **kwargs):
""" Get hue value of HSL color.
"""
h = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[0]
return NumberValue(h * 360.0)
def _lightness(color, **kwargs):
""" Get lightness value of HSL color.
"""
l = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[1]
return NumberValue((l * 100, '%'))
def _saturation(color, **kwargs):
""" Get saturation value of HSL color.
"""
s = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[2]
return NumberValue((s * 100, '%'))
def _adjust_hue(color, degrees, **kwargs):
return hsl_op(OPRT['+'], color, degrees, 0, 0)
def _lighten(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _darken(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, 0, amount)
def _saturate(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _desaturate(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, amount, 0)
def _grayscale(color, **kwargs):
return hsl_op(OPRT['-'], color, 0, 100, 0)
def _complement(color, **kwargs):
return hsl_op(OPRT['+'], color, 180.0, 0, 0)
# Opacity functions
# =================
def _alpha(color, **kwargs):
c = ColorValue(color).value
return NumberValue(c[3])
def _opacify(color, amount, **kwargs):
return rgba_op(OPRT['+'], color, 0, 0, 0, amount)
def _transparentize(color, amount, **kwargs):
return rgba_op(OPRT['-'], color, 0, 0, 0, amount)
# String functions
# =================
def _unquote(*args, **kwargs):
return StringValue(' '.join(str(s).strip("\"'") for s in args))
def _quote(*args, **kwargs):
return QuotedStringValue(' '.join(str(s) for s in args))
# Number functions
# =================
def _percentage(value, **kwargs):
value = NumberValue(value)
if not value.units == '%':
value.value *= 100
value.units = '%'
return value
def _abs(value, **kwargs):
return abs(float(value))
def _pi(**kwargs):
return NumberValue(math.pi)
def _sin(value, **kwargs):
return math.sin(value)
def _cos(value, **kwargs):
return math.cos(value)
def _tan(value, **kwargs):
return math.tan(value)
def _round(value, **kwargs):
return float(round(value))
def _ceil(value, **kwargs):
return float(math.ceil(value))
def _floor(value, **kwargs):
return float(math.floor(value))
# Introspection functions
# =======================
def _type_of(obj, **kwargs):
if isinstance(obj, BooleanValue):
return StringValue('bool')
if isinstance(obj, NumberValue):
return StringValue('number')
if isinstance(obj, QuotedStringValue):
return StringValue('string')
if isinstance(obj, ColorValue):
return StringValue('color')
if isinstance(obj, dict):
return StringValue('list')
return 'unknown'
def _unit(value, **kwargs):
return NumberValue(value).units
def _unitless(value, **kwargs):
if NumberValue(value).units:
return BooleanValue(False)
return BooleanValue(True)
def _comparable(n1, n2, **kwargs):
n1, n2 = NumberValue(n1), NumberValue(n2)
type1 = CONV_TYPE.get(n1.units)
type2 = CONV_TYPE.get(n2.units)
return BooleanValue(type1 == type2)
# Color functions
# ================
def _adjust_color(
color,
saturation=0.0,
lightness=0.0,
red=0.0,
green=0.0,
blue=0.0,
alpha=0.0,
**kwargs):
return __asc_color(
OPRT['+'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _scale_color(
color,
saturation=1.0,
lightness=1.0,
red=1.0,
green=1.0,
blue=1.0,
alpha=1.0,
**kwargs):
return __asc_color(
OPRT['*'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _change_color(
color,
saturation=None,
lightness=None,
red=None,
green=None,
blue=None,
alpha=None,
**kwargs):
return __asc_color(
None,
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _invert(color, **kwargs):
""" Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone.
"""
col = ColorValue(color)
args = [
255.0 - col.value[0],
255.0 - col.value[1],
255.0 - col.value[2],
col.value[3],
]
inverted = ColorValue(args)
return inverted
def _adjust_lightness(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _adjust_saturation(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _scale_lightness(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, 0, amount)
def _scale_saturation(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, amount, 0)
# Compass helpers
# ================
def _color_stops(*args, **kwargs):
raise NotImplementedError
def _elements_of_type(display, **kwargs):
return StringValue(ELEMENTS_OF_TYPE.get(StringValue(display).value, ''))
def _enumerate(s, b, e, **kwargs):
return ', '.join(
"%s%d" % (StringValue(s).value, x)
for x in range(int(b.value), int(e.value + 1)))
def _font_files(*args, **kwargs):
raise NotImplementedError
def _headings(a=None, b=None, **kwargs):
h = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
if not a or StringValue(a).value == 'all':
a, b = 1, 6
elif b is None:
b, a = a.value + 1, 1
return ', '.join(h[int(float(a) - 1):int(float(b))])
def _nest(*args, **kwargs):
return ', '.join(
' '.join(s.strip() for s in p)
if '&' not in p[1] else p[1].replace('&', p[0].strip())
for p in product(
*(StringValue(sel).value.split(',') for sel in args)
)
)
@check_pil
def _image_width(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[0], 'px'])
@check_pil
def _image_height(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[1], 'px'])
def _image_url(image, **kwargs):
return QuotedStringValue(image).value
def _inline_image(image, mimetype=None, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
if os.path.exists(path):
mimetype = StringValue(mimetype).value or mimetypes.guess_type(path)[0]
f = open(path, 'rb')
if PY3:
data = base64.b64encode(f.read()).decode('utf-8')
else:
data = base64.b64encode(f.read())
url = 'data:' + mimetype + ';base64,' + data
else:
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
url = '%s?_=NA' % QuotedStringValue(image).value
inline = 'url("%s")' % url
return StringValue(inline)
# Misc
# ====
def _if(cond, body, els, **kwargs):
if BooleanValue(cond).value:
return body
return els
def _sprite_position(*args):
pass
def _sprite_file(*args):
pass
def _sprite(*args):
pass
def _sprite_map(*args):
pass
def _sprite_map_name(*args):
pass
def _sprite_url(*args):
pass
def _opposite_position(*args):
pass
def _grad_point(*args):
pass
def _grad_color_stops(*args):
pass
def _nth(*args):
pass
def _join(*args):
pass
def _append(*args):
pass
FUNCTION_LIST = {
# RGB functions
'rgb:3': _rgb,
'rgba:4': _rgba,
'red:1': _red,
'green:1': _green,
'blue:1': _blue,
'mix:2': _mix,
'mix:3': _mix,
# HSL functions
'hsl:3': _hsl,
'hsla:4': _hsla,
'hue:1': _hue,
'saturation:1': _saturation,
'lightness:1': _lightness,
'adjust-hue:2': _adjust_hue,
'spin:2': _adjust_hue,
'lighten:2': _lighten,
'darken:2': _darken,
'saturate:2': _saturate,
'desaturate:2': _desaturate,
'grayscale:1': _grayscale,
'complement:1': _complement,
# Opacity functions
'alpha:1': _alpha,
'opacity:1': _alpha,
'opacify:2': _opacify,
'fadein:2': _opacify,
'fade-in:2': _opacify,
'transparentize:2': _transparentize,
'fadeout:2': _transparentize,
'fade-out:2': _transparentize,
# String functions
'quote:n': _quote,
'unquote:n': _unquote,
# Number functions
'percentage:1': _percentage,
'sin:1': _sin,
'cos:1': _cos,
'tan:1': _tan,
'abs:1': _abs,
'round:1': _round,
'ceil:1': _ceil,
'floor:1': _floor,
'pi:0': _pi,
# Introspection functions
'type-of:1': _type_of,
'unit:1': _unit,
'unitless:1': _unitless,
'comparable:2': _comparable,
# Color functions
'adjust-color:n': _adjust_color,
'scale-color:n': _scale_color,
'change-color:n': _change_color,
'adjust-lightness:2': _adjust_lightness,
'adjust-saturation:2': _adjust_saturation,
'scale-lightness:2': _scale_lightness,
'scale-saturation:2': _scale_saturation,
'invert:1': _invert,
# Compass helpers
'append-selector:2': _nest,
'color-stops:n': _color_stops,
'enumerate:3': _enumerate,
'elements-of-type:1': _elements_of_type,
'font-files:n': _font_files,
'headings:n': _headings,
'nest:n': _nest,
# Images functions
'image-url:1': _image_url,
'image-width:1': _image_width,
'image-height:1': _image_height,
'inline-image:1': _inline_image,
'inline-image:2': _inline_image,
# Not implemented
'sprite-map:1': _sprite_map,
'sprite:2': _sprite,
'sprite:3': _sprite,
'sprite:4': _sprite,
'sprite-map-name:1': _sprite_map_name,
'sprite-file:2': _sprite_file,
'sprite-url:1': _sprite_url,
'sprite-position:2': _sprite_position,
'sprite-position:3': _sprite_position,
'sprite-position:4': _sprite_position,
'opposite-position:n': _opposite_position,
'grad-point:n': _grad_point,
'grad-color-stops:n': _grad_color_stops,
'nth:2': _nth,
'first-value-of:1': _nth,
'join:2': _join,
'join:3': _join,
'append:2': _append,
'append:3': _append,
'if:3': _if,
'escape:1': _unquote,
'e:1': _unquote,
}
def __asc_color(op, color, saturation, lightness, red, green, blue, alpha):
if lightness or saturation:
color = hsl_op(op, color, 0, saturation, lightness)
if red or green or blue or alpha:
color = rgba_op(op, color, red, green, blue, alpha)
return color
def __get_size(path, **kwargs):
root = kwargs.get('root')
if path not in IMAGES:
if not os.path.exists(path):
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
return 0, 0
image = Image.open(path)
IMAGES[path] = image.size
return IMAGES[path]
# pylama:ignore=F0401,D
|
klen/python-scss | scss/function.py | check_pil | python | def check_pil(func):
def __wrapper(*args, **kwargs):
root = kwargs.get('root')
if not Image:
if root and root.get_opt('warn'):
warn("Images manipulation require PIL")
return 'none'
return func(*args, **kwargs)
return __wrapper | PIL module checking decorator. | train | https://github.com/klen/python-scss/blob/34fe985e6b43caa9f9b9bcd0dc433be4b2a1fdec/scss/function.py#L54-L64 | null | from __future__ import print_function
import base64
import colorsys
import math
import mimetypes
import os.path
import sys
from .compat import PY3
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
from . import OPRT, CONV_TYPE, ELEMENTS_OF_TYPE
from .value import (
NumberValue, StringValue, QuotedStringValue, ColorValue, BooleanValue,
hsl_op, rgba_op)
try:
from PIL import Image
except ImportError:
Image = None
IMAGES = dict()
def warn(warning):
""" Write warning messages in stderr.
"""
print("\nWarning: %s" % str(warning), file=sys.stderr)
def unknown(*args, **kwargs):
""" Unknow scss function handler.
Simple return 'funcname(args)'
"""
name = kwargs.get('name', '')
return "%s(%s)" % (name, ', '.join(str(a) for a in args))
# RGB functions
# =============
def _rgb(r, g, b, **kwargs):
""" Converts an rgb(red, green, blue) triplet into a color.
"""
return _rgba(r, g, b, 1.0)
def _rgba(r, g, b, a, **kwargs):
""" Converts an rgba(red, green, blue, alpha) quadruplet into a color.
"""
return ColorValue((float(r), float(g), float(b), float(a)))
def _red(color, **kwargs):
""" Gets the red component of a color.
"""
return NumberValue(color.value[0])
def _green(color, **kwargs):
""" Gets the green component of a color.
"""
return NumberValue(color.value[1])
def _blue(color, **kwargs):
""" Gets the blue component of a color.
"""
return NumberValue(color.value[2])
def _mix(color1, color2, weight=0.5, **kwargs):
""" Mixes two colors together.
"""
weight = float(weight)
c1 = color1.value
c2 = color2.value
p = 0.0 if weight < 0 else 1.0 if weight > 1 else weight
w = p * 2 - 1
a = c1[3] - c2[3]
w1 = ((w if (w * a == -1) else (w + a) / (1 + w * a)) + 1) / 2.0
w2 = 1 - w1
q = [w1, w1, w1, p]
r = [w2, w2, w2, 1 - p]
return ColorValue([c1[i] * q[i] + c2[i] * r[i] for i in range(4)])
# HSL functions
# =============
def _hsl(h, s, l, **kwargs):
""" HSL color value.
"""
return _hsla(h, s, l, 1.0)
def _hsla(h, s, l, a, **kwargs):
""" HSL with alpha channel color value.
"""
res = colorsys.hls_to_rgb(float(h), float(l), float(s))
return ColorValue([x * 255.0 for x in res] + [float(a)])
def _hue(color, **kwargs):
""" Get hue value of HSL color.
"""
h = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[0]
return NumberValue(h * 360.0)
def _lightness(color, **kwargs):
""" Get lightness value of HSL color.
"""
l = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[1]
return NumberValue((l * 100, '%'))
def _saturation(color, **kwargs):
""" Get saturation value of HSL color.
"""
s = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[2]
return NumberValue((s * 100, '%'))
def _adjust_hue(color, degrees, **kwargs):
return hsl_op(OPRT['+'], color, degrees, 0, 0)
def _lighten(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _darken(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, 0, amount)
def _saturate(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _desaturate(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, amount, 0)
def _grayscale(color, **kwargs):
return hsl_op(OPRT['-'], color, 0, 100, 0)
def _complement(color, **kwargs):
return hsl_op(OPRT['+'], color, 180.0, 0, 0)
# Opacity functions
# =================
def _alpha(color, **kwargs):
c = ColorValue(color).value
return NumberValue(c[3])
def _opacify(color, amount, **kwargs):
return rgba_op(OPRT['+'], color, 0, 0, 0, amount)
def _transparentize(color, amount, **kwargs):
return rgba_op(OPRT['-'], color, 0, 0, 0, amount)
# String functions
# =================
def _unquote(*args, **kwargs):
return StringValue(' '.join(str(s).strip("\"'") for s in args))
def _quote(*args, **kwargs):
return QuotedStringValue(' '.join(str(s) for s in args))
# Number functions
# =================
def _percentage(value, **kwargs):
value = NumberValue(value)
if not value.units == '%':
value.value *= 100
value.units = '%'
return value
def _abs(value, **kwargs):
return abs(float(value))
def _pi(**kwargs):
return NumberValue(math.pi)
def _sin(value, **kwargs):
return math.sin(value)
def _cos(value, **kwargs):
return math.cos(value)
def _tan(value, **kwargs):
return math.tan(value)
def _round(value, **kwargs):
return float(round(value))
def _ceil(value, **kwargs):
return float(math.ceil(value))
def _floor(value, **kwargs):
return float(math.floor(value))
# Introspection functions
# =======================
def _type_of(obj, **kwargs):
if isinstance(obj, BooleanValue):
return StringValue('bool')
if isinstance(obj, NumberValue):
return StringValue('number')
if isinstance(obj, QuotedStringValue):
return StringValue('string')
if isinstance(obj, ColorValue):
return StringValue('color')
if isinstance(obj, dict):
return StringValue('list')
return 'unknown'
def _unit(value, **kwargs):
return NumberValue(value).units
def _unitless(value, **kwargs):
if NumberValue(value).units:
return BooleanValue(False)
return BooleanValue(True)
def _comparable(n1, n2, **kwargs):
n1, n2 = NumberValue(n1), NumberValue(n2)
type1 = CONV_TYPE.get(n1.units)
type2 = CONV_TYPE.get(n2.units)
return BooleanValue(type1 == type2)
# Color functions
# ================
def _adjust_color(
color,
saturation=0.0,
lightness=0.0,
red=0.0,
green=0.0,
blue=0.0,
alpha=0.0,
**kwargs):
return __asc_color(
OPRT['+'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _scale_color(
color,
saturation=1.0,
lightness=1.0,
red=1.0,
green=1.0,
blue=1.0,
alpha=1.0,
**kwargs):
return __asc_color(
OPRT['*'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _change_color(
color,
saturation=None,
lightness=None,
red=None,
green=None,
blue=None,
alpha=None,
**kwargs):
return __asc_color(
None,
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _invert(color, **kwargs):
""" Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone.
"""
col = ColorValue(color)
args = [
255.0 - col.value[0],
255.0 - col.value[1],
255.0 - col.value[2],
col.value[3],
]
inverted = ColorValue(args)
return inverted
def _adjust_lightness(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _adjust_saturation(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _scale_lightness(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, 0, amount)
def _scale_saturation(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, amount, 0)
# Compass helpers
# ================
def _color_stops(*args, **kwargs):
raise NotImplementedError
def _elements_of_type(display, **kwargs):
return StringValue(ELEMENTS_OF_TYPE.get(StringValue(display).value, ''))
def _enumerate(s, b, e, **kwargs):
return ', '.join(
"%s%d" % (StringValue(s).value, x)
for x in range(int(b.value), int(e.value + 1)))
def _font_files(*args, **kwargs):
raise NotImplementedError
def _headings(a=None, b=None, **kwargs):
h = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
if not a or StringValue(a).value == 'all':
a, b = 1, 6
elif b is None:
b, a = a.value + 1, 1
return ', '.join(h[int(float(a) - 1):int(float(b))])
def _nest(*args, **kwargs):
return ', '.join(
' '.join(s.strip() for s in p)
if '&' not in p[1] else p[1].replace('&', p[0].strip())
for p in product(
*(StringValue(sel).value.split(',') for sel in args)
)
)
@check_pil
def _image_width(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[0], 'px'])
@check_pil
def _image_height(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[1], 'px'])
def _image_url(image, **kwargs):
return QuotedStringValue(image).value
def _inline_image(image, mimetype=None, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
if os.path.exists(path):
mimetype = StringValue(mimetype).value or mimetypes.guess_type(path)[0]
f = open(path, 'rb')
if PY3:
data = base64.b64encode(f.read()).decode('utf-8')
else:
data = base64.b64encode(f.read())
url = 'data:' + mimetype + ';base64,' + data
else:
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
url = '%s?_=NA' % QuotedStringValue(image).value
inline = 'url("%s")' % url
return StringValue(inline)
# Misc
# ====
def _if(cond, body, els, **kwargs):
if BooleanValue(cond).value:
return body
return els
def _sprite_position(*args):
pass
def _sprite_file(*args):
pass
def _sprite(*args):
pass
def _sprite_map(*args):
pass
def _sprite_map_name(*args):
pass
def _sprite_url(*args):
pass
def _opposite_position(*args):
pass
def _grad_point(*args):
pass
def _grad_color_stops(*args):
pass
def _nth(*args):
pass
def _join(*args):
pass
def _append(*args):
pass
FUNCTION_LIST = {
# RGB functions
'rgb:3': _rgb,
'rgba:4': _rgba,
'red:1': _red,
'green:1': _green,
'blue:1': _blue,
'mix:2': _mix,
'mix:3': _mix,
# HSL functions
'hsl:3': _hsl,
'hsla:4': _hsla,
'hue:1': _hue,
'saturation:1': _saturation,
'lightness:1': _lightness,
'adjust-hue:2': _adjust_hue,
'spin:2': _adjust_hue,
'lighten:2': _lighten,
'darken:2': _darken,
'saturate:2': _saturate,
'desaturate:2': _desaturate,
'grayscale:1': _grayscale,
'complement:1': _complement,
# Opacity functions
'alpha:1': _alpha,
'opacity:1': _alpha,
'opacify:2': _opacify,
'fadein:2': _opacify,
'fade-in:2': _opacify,
'transparentize:2': _transparentize,
'fadeout:2': _transparentize,
'fade-out:2': _transparentize,
# String functions
'quote:n': _quote,
'unquote:n': _unquote,
# Number functions
'percentage:1': _percentage,
'sin:1': _sin,
'cos:1': _cos,
'tan:1': _tan,
'abs:1': _abs,
'round:1': _round,
'ceil:1': _ceil,
'floor:1': _floor,
'pi:0': _pi,
# Introspection functions
'type-of:1': _type_of,
'unit:1': _unit,
'unitless:1': _unitless,
'comparable:2': _comparable,
# Color functions
'adjust-color:n': _adjust_color,
'scale-color:n': _scale_color,
'change-color:n': _change_color,
'adjust-lightness:2': _adjust_lightness,
'adjust-saturation:2': _adjust_saturation,
'scale-lightness:2': _scale_lightness,
'scale-saturation:2': _scale_saturation,
'invert:1': _invert,
# Compass helpers
'append-selector:2': _nest,
'color-stops:n': _color_stops,
'enumerate:3': _enumerate,
'elements-of-type:1': _elements_of_type,
'font-files:n': _font_files,
'headings:n': _headings,
'nest:n': _nest,
# Images functions
'image-url:1': _image_url,
'image-width:1': _image_width,
'image-height:1': _image_height,
'inline-image:1': _inline_image,
'inline-image:2': _inline_image,
# Not implemented
'sprite-map:1': _sprite_map,
'sprite:2': _sprite,
'sprite:3': _sprite,
'sprite:4': _sprite,
'sprite-map-name:1': _sprite_map_name,
'sprite-file:2': _sprite_file,
'sprite-url:1': _sprite_url,
'sprite-position:2': _sprite_position,
'sprite-position:3': _sprite_position,
'sprite-position:4': _sprite_position,
'opposite-position:n': _opposite_position,
'grad-point:n': _grad_point,
'grad-color-stops:n': _grad_color_stops,
'nth:2': _nth,
'first-value-of:1': _nth,
'join:2': _join,
'join:3': _join,
'append:2': _append,
'append:3': _append,
'if:3': _if,
'escape:1': _unquote,
'e:1': _unquote,
}
def __asc_color(op, color, saturation, lightness, red, green, blue, alpha):
if lightness or saturation:
color = hsl_op(op, color, 0, saturation, lightness)
if red or green or blue or alpha:
color = rgba_op(op, color, red, green, blue, alpha)
return color
def __get_size(path, **kwargs):
root = kwargs.get('root')
if path not in IMAGES:
if not os.path.exists(path):
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
return 0, 0
image = Image.open(path)
IMAGES[path] = image.size
return IMAGES[path]
# pylama:ignore=F0401,D
|
klen/python-scss | scss/function.py | _rgba | python | def _rgba(r, g, b, a, **kwargs):
return ColorValue((float(r), float(g), float(b), float(a))) | Converts an rgba(red, green, blue, alpha) quadruplet into a color. | train | https://github.com/klen/python-scss/blob/34fe985e6b43caa9f9b9bcd0dc433be4b2a1fdec/scss/function.py#L76-L79 | null | from __future__ import print_function
import base64
import colorsys
import math
import mimetypes
import os.path
import sys
from .compat import PY3
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
from . import OPRT, CONV_TYPE, ELEMENTS_OF_TYPE
from .value import (
NumberValue, StringValue, QuotedStringValue, ColorValue, BooleanValue,
hsl_op, rgba_op)
try:
from PIL import Image
except ImportError:
Image = None
IMAGES = dict()
def warn(warning):
""" Write warning messages in stderr.
"""
print("\nWarning: %s" % str(warning), file=sys.stderr)
def unknown(*args, **kwargs):
""" Unknow scss function handler.
Simple return 'funcname(args)'
"""
name = kwargs.get('name', '')
return "%s(%s)" % (name, ', '.join(str(a) for a in args))
def check_pil(func):
""" PIL module checking decorator.
"""
def __wrapper(*args, **kwargs):
root = kwargs.get('root')
if not Image:
if root and root.get_opt('warn'):
warn("Images manipulation require PIL")
return 'none'
return func(*args, **kwargs)
return __wrapper
# RGB functions
# =============
def _rgb(r, g, b, **kwargs):
""" Converts an rgb(red, green, blue) triplet into a color.
"""
return _rgba(r, g, b, 1.0)
def _red(color, **kwargs):
""" Gets the red component of a color.
"""
return NumberValue(color.value[0])
def _green(color, **kwargs):
""" Gets the green component of a color.
"""
return NumberValue(color.value[1])
def _blue(color, **kwargs):
""" Gets the blue component of a color.
"""
return NumberValue(color.value[2])
def _mix(color1, color2, weight=0.5, **kwargs):
""" Mixes two colors together.
"""
weight = float(weight)
c1 = color1.value
c2 = color2.value
p = 0.0 if weight < 0 else 1.0 if weight > 1 else weight
w = p * 2 - 1
a = c1[3] - c2[3]
w1 = ((w if (w * a == -1) else (w + a) / (1 + w * a)) + 1) / 2.0
w2 = 1 - w1
q = [w1, w1, w1, p]
r = [w2, w2, w2, 1 - p]
return ColorValue([c1[i] * q[i] + c2[i] * r[i] for i in range(4)])
# HSL functions
# =============
def _hsl(h, s, l, **kwargs):
""" HSL color value.
"""
return _hsla(h, s, l, 1.0)
def _hsla(h, s, l, a, **kwargs):
""" HSL with alpha channel color value.
"""
res = colorsys.hls_to_rgb(float(h), float(l), float(s))
return ColorValue([x * 255.0 for x in res] + [float(a)])
def _hue(color, **kwargs):
""" Get hue value of HSL color.
"""
h = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[0]
return NumberValue(h * 360.0)
def _lightness(color, **kwargs):
""" Get lightness value of HSL color.
"""
l = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[1]
return NumberValue((l * 100, '%'))
def _saturation(color, **kwargs):
""" Get saturation value of HSL color.
"""
s = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[2]
return NumberValue((s * 100, '%'))
def _adjust_hue(color, degrees, **kwargs):
return hsl_op(OPRT['+'], color, degrees, 0, 0)
def _lighten(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _darken(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, 0, amount)
def _saturate(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _desaturate(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, amount, 0)
def _grayscale(color, **kwargs):
return hsl_op(OPRT['-'], color, 0, 100, 0)
def _complement(color, **kwargs):
return hsl_op(OPRT['+'], color, 180.0, 0, 0)
# Opacity functions
# =================
def _alpha(color, **kwargs):
c = ColorValue(color).value
return NumberValue(c[3])
def _opacify(color, amount, **kwargs):
return rgba_op(OPRT['+'], color, 0, 0, 0, amount)
def _transparentize(color, amount, **kwargs):
return rgba_op(OPRT['-'], color, 0, 0, 0, amount)
# String functions
# =================
def _unquote(*args, **kwargs):
return StringValue(' '.join(str(s).strip("\"'") for s in args))
def _quote(*args, **kwargs):
return QuotedStringValue(' '.join(str(s) for s in args))
# Number functions
# =================
def _percentage(value, **kwargs):
value = NumberValue(value)
if not value.units == '%':
value.value *= 100
value.units = '%'
return value
def _abs(value, **kwargs):
return abs(float(value))
def _pi(**kwargs):
return NumberValue(math.pi)
def _sin(value, **kwargs):
return math.sin(value)
def _cos(value, **kwargs):
return math.cos(value)
def _tan(value, **kwargs):
return math.tan(value)
def _round(value, **kwargs):
return float(round(value))
def _ceil(value, **kwargs):
return float(math.ceil(value))
def _floor(value, **kwargs):
return float(math.floor(value))
# Introspection functions
# =======================
def _type_of(obj, **kwargs):
if isinstance(obj, BooleanValue):
return StringValue('bool')
if isinstance(obj, NumberValue):
return StringValue('number')
if isinstance(obj, QuotedStringValue):
return StringValue('string')
if isinstance(obj, ColorValue):
return StringValue('color')
if isinstance(obj, dict):
return StringValue('list')
return 'unknown'
def _unit(value, **kwargs):
return NumberValue(value).units
def _unitless(value, **kwargs):
if NumberValue(value).units:
return BooleanValue(False)
return BooleanValue(True)
def _comparable(n1, n2, **kwargs):
n1, n2 = NumberValue(n1), NumberValue(n2)
type1 = CONV_TYPE.get(n1.units)
type2 = CONV_TYPE.get(n2.units)
return BooleanValue(type1 == type2)
# Color functions
# ================
def _adjust_color(
color,
saturation=0.0,
lightness=0.0,
red=0.0,
green=0.0,
blue=0.0,
alpha=0.0,
**kwargs):
return __asc_color(
OPRT['+'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _scale_color(
color,
saturation=1.0,
lightness=1.0,
red=1.0,
green=1.0,
blue=1.0,
alpha=1.0,
**kwargs):
return __asc_color(
OPRT['*'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _change_color(
color,
saturation=None,
lightness=None,
red=None,
green=None,
blue=None,
alpha=None,
**kwargs):
return __asc_color(
None,
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _invert(color, **kwargs):
""" Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone.
"""
col = ColorValue(color)
args = [
255.0 - col.value[0],
255.0 - col.value[1],
255.0 - col.value[2],
col.value[3],
]
inverted = ColorValue(args)
return inverted
def _adjust_lightness(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _adjust_saturation(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _scale_lightness(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, 0, amount)
def _scale_saturation(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, amount, 0)
# Compass helpers
# ================
def _color_stops(*args, **kwargs):
raise NotImplementedError
def _elements_of_type(display, **kwargs):
return StringValue(ELEMENTS_OF_TYPE.get(StringValue(display).value, ''))
def _enumerate(s, b, e, **kwargs):
return ', '.join(
"%s%d" % (StringValue(s).value, x)
for x in range(int(b.value), int(e.value + 1)))
def _font_files(*args, **kwargs):
raise NotImplementedError
def _headings(a=None, b=None, **kwargs):
h = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
if not a or StringValue(a).value == 'all':
a, b = 1, 6
elif b is None:
b, a = a.value + 1, 1
return ', '.join(h[int(float(a) - 1):int(float(b))])
def _nest(*args, **kwargs):
return ', '.join(
' '.join(s.strip() for s in p)
if '&' not in p[1] else p[1].replace('&', p[0].strip())
for p in product(
*(StringValue(sel).value.split(',') for sel in args)
)
)
@check_pil
def _image_width(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[0], 'px'])
@check_pil
def _image_height(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[1], 'px'])
def _image_url(image, **kwargs):
return QuotedStringValue(image).value
def _inline_image(image, mimetype=None, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
if os.path.exists(path):
mimetype = StringValue(mimetype).value or mimetypes.guess_type(path)[0]
f = open(path, 'rb')
if PY3:
data = base64.b64encode(f.read()).decode('utf-8')
else:
data = base64.b64encode(f.read())
url = 'data:' + mimetype + ';base64,' + data
else:
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
url = '%s?_=NA' % QuotedStringValue(image).value
inline = 'url("%s")' % url
return StringValue(inline)
# Misc
# ====
def _if(cond, body, els, **kwargs):
if BooleanValue(cond).value:
return body
return els
def _sprite_position(*args):
pass
def _sprite_file(*args):
pass
def _sprite(*args):
pass
def _sprite_map(*args):
pass
def _sprite_map_name(*args):
pass
def _sprite_url(*args):
pass
def _opposite_position(*args):
pass
def _grad_point(*args):
pass
def _grad_color_stops(*args):
pass
def _nth(*args):
pass
def _join(*args):
pass
def _append(*args):
pass
FUNCTION_LIST = {
# RGB functions
'rgb:3': _rgb,
'rgba:4': _rgba,
'red:1': _red,
'green:1': _green,
'blue:1': _blue,
'mix:2': _mix,
'mix:3': _mix,
# HSL functions
'hsl:3': _hsl,
'hsla:4': _hsla,
'hue:1': _hue,
'saturation:1': _saturation,
'lightness:1': _lightness,
'adjust-hue:2': _adjust_hue,
'spin:2': _adjust_hue,
'lighten:2': _lighten,
'darken:2': _darken,
'saturate:2': _saturate,
'desaturate:2': _desaturate,
'grayscale:1': _grayscale,
'complement:1': _complement,
# Opacity functions
'alpha:1': _alpha,
'opacity:1': _alpha,
'opacify:2': _opacify,
'fadein:2': _opacify,
'fade-in:2': _opacify,
'transparentize:2': _transparentize,
'fadeout:2': _transparentize,
'fade-out:2': _transparentize,
# String functions
'quote:n': _quote,
'unquote:n': _unquote,
# Number functions
'percentage:1': _percentage,
'sin:1': _sin,
'cos:1': _cos,
'tan:1': _tan,
'abs:1': _abs,
'round:1': _round,
'ceil:1': _ceil,
'floor:1': _floor,
'pi:0': _pi,
# Introspection functions
'type-of:1': _type_of,
'unit:1': _unit,
'unitless:1': _unitless,
'comparable:2': _comparable,
# Color functions
'adjust-color:n': _adjust_color,
'scale-color:n': _scale_color,
'change-color:n': _change_color,
'adjust-lightness:2': _adjust_lightness,
'adjust-saturation:2': _adjust_saturation,
'scale-lightness:2': _scale_lightness,
'scale-saturation:2': _scale_saturation,
'invert:1': _invert,
# Compass helpers
'append-selector:2': _nest,
'color-stops:n': _color_stops,
'enumerate:3': _enumerate,
'elements-of-type:1': _elements_of_type,
'font-files:n': _font_files,
'headings:n': _headings,
'nest:n': _nest,
# Images functions
'image-url:1': _image_url,
'image-width:1': _image_width,
'image-height:1': _image_height,
'inline-image:1': _inline_image,
'inline-image:2': _inline_image,
# Not implemented
'sprite-map:1': _sprite_map,
'sprite:2': _sprite,
'sprite:3': _sprite,
'sprite:4': _sprite,
'sprite-map-name:1': _sprite_map_name,
'sprite-file:2': _sprite_file,
'sprite-url:1': _sprite_url,
'sprite-position:2': _sprite_position,
'sprite-position:3': _sprite_position,
'sprite-position:4': _sprite_position,
'opposite-position:n': _opposite_position,
'grad-point:n': _grad_point,
'grad-color-stops:n': _grad_color_stops,
'nth:2': _nth,
'first-value-of:1': _nth,
'join:2': _join,
'join:3': _join,
'append:2': _append,
'append:3': _append,
'if:3': _if,
'escape:1': _unquote,
'e:1': _unquote,
}
def __asc_color(op, color, saturation, lightness, red, green, blue, alpha):
if lightness or saturation:
color = hsl_op(op, color, 0, saturation, lightness)
if red or green or blue or alpha:
color = rgba_op(op, color, red, green, blue, alpha)
return color
def __get_size(path, **kwargs):
root = kwargs.get('root')
if path not in IMAGES:
if not os.path.exists(path):
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
return 0, 0
image = Image.open(path)
IMAGES[path] = image.size
return IMAGES[path]
# pylama:ignore=F0401,D
|
klen/python-scss | scss/function.py | _mix | python | def _mix(color1, color2, weight=0.5, **kwargs):
weight = float(weight)
c1 = color1.value
c2 = color2.value
p = 0.0 if weight < 0 else 1.0 if weight > 1 else weight
w = p * 2 - 1
a = c1[3] - c2[3]
w1 = ((w if (w * a == -1) else (w + a) / (1 + w * a)) + 1) / 2.0
w2 = 1 - w1
q = [w1, w1, w1, p]
r = [w2, w2, w2, 1 - p]
return ColorValue([c1[i] * q[i] + c2[i] * r[i] for i in range(4)]) | Mixes two colors together. | train | https://github.com/klen/python-scss/blob/34fe985e6b43caa9f9b9bcd0dc433be4b2a1fdec/scss/function.py#L100-L114 | null | from __future__ import print_function
import base64
import colorsys
import math
import mimetypes
import os.path
import sys
from .compat import PY3
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
from . import OPRT, CONV_TYPE, ELEMENTS_OF_TYPE
from .value import (
NumberValue, StringValue, QuotedStringValue, ColorValue, BooleanValue,
hsl_op, rgba_op)
try:
from PIL import Image
except ImportError:
Image = None
IMAGES = dict()
def warn(warning):
""" Write warning messages in stderr.
"""
print("\nWarning: %s" % str(warning), file=sys.stderr)
def unknown(*args, **kwargs):
""" Unknow scss function handler.
Simple return 'funcname(args)'
"""
name = kwargs.get('name', '')
return "%s(%s)" % (name, ', '.join(str(a) for a in args))
def check_pil(func):
""" PIL module checking decorator.
"""
def __wrapper(*args, **kwargs):
root = kwargs.get('root')
if not Image:
if root and root.get_opt('warn'):
warn("Images manipulation require PIL")
return 'none'
return func(*args, **kwargs)
return __wrapper
# RGB functions
# =============
def _rgb(r, g, b, **kwargs):
""" Converts an rgb(red, green, blue) triplet into a color.
"""
return _rgba(r, g, b, 1.0)
def _rgba(r, g, b, a, **kwargs):
""" Converts an rgba(red, green, blue, alpha) quadruplet into a color.
"""
return ColorValue((float(r), float(g), float(b), float(a)))
def _red(color, **kwargs):
""" Gets the red component of a color.
"""
return NumberValue(color.value[0])
def _green(color, **kwargs):
""" Gets the green component of a color.
"""
return NumberValue(color.value[1])
def _blue(color, **kwargs):
""" Gets the blue component of a color.
"""
return NumberValue(color.value[2])
# HSL functions
# =============
def _hsl(h, s, l, **kwargs):
""" HSL color value.
"""
return _hsla(h, s, l, 1.0)
def _hsla(h, s, l, a, **kwargs):
""" HSL with alpha channel color value.
"""
res = colorsys.hls_to_rgb(float(h), float(l), float(s))
return ColorValue([x * 255.0 for x in res] + [float(a)])
def _hue(color, **kwargs):
""" Get hue value of HSL color.
"""
h = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[0]
return NumberValue(h * 360.0)
def _lightness(color, **kwargs):
""" Get lightness value of HSL color.
"""
l = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[1]
return NumberValue((l * 100, '%'))
def _saturation(color, **kwargs):
""" Get saturation value of HSL color.
"""
s = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[2]
return NumberValue((s * 100, '%'))
def _adjust_hue(color, degrees, **kwargs):
return hsl_op(OPRT['+'], color, degrees, 0, 0)
def _lighten(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _darken(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, 0, amount)
def _saturate(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _desaturate(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, amount, 0)
def _grayscale(color, **kwargs):
return hsl_op(OPRT['-'], color, 0, 100, 0)
def _complement(color, **kwargs):
return hsl_op(OPRT['+'], color, 180.0, 0, 0)
# Opacity functions
# =================
def _alpha(color, **kwargs):
c = ColorValue(color).value
return NumberValue(c[3])
def _opacify(color, amount, **kwargs):
return rgba_op(OPRT['+'], color, 0, 0, 0, amount)
def _transparentize(color, amount, **kwargs):
return rgba_op(OPRT['-'], color, 0, 0, 0, amount)
# String functions
# =================
def _unquote(*args, **kwargs):
return StringValue(' '.join(str(s).strip("\"'") for s in args))
def _quote(*args, **kwargs):
return QuotedStringValue(' '.join(str(s) for s in args))
# Number functions
# =================
def _percentage(value, **kwargs):
value = NumberValue(value)
if not value.units == '%':
value.value *= 100
value.units = '%'
return value
def _abs(value, **kwargs):
return abs(float(value))
def _pi(**kwargs):
return NumberValue(math.pi)
def _sin(value, **kwargs):
return math.sin(value)
def _cos(value, **kwargs):
return math.cos(value)
def _tan(value, **kwargs):
return math.tan(value)
def _round(value, **kwargs):
return float(round(value))
def _ceil(value, **kwargs):
return float(math.ceil(value))
def _floor(value, **kwargs):
return float(math.floor(value))
# Introspection functions
# =======================
def _type_of(obj, **kwargs):
if isinstance(obj, BooleanValue):
return StringValue('bool')
if isinstance(obj, NumberValue):
return StringValue('number')
if isinstance(obj, QuotedStringValue):
return StringValue('string')
if isinstance(obj, ColorValue):
return StringValue('color')
if isinstance(obj, dict):
return StringValue('list')
return 'unknown'
def _unit(value, **kwargs):
return NumberValue(value).units
def _unitless(value, **kwargs):
if NumberValue(value).units:
return BooleanValue(False)
return BooleanValue(True)
def _comparable(n1, n2, **kwargs):
n1, n2 = NumberValue(n1), NumberValue(n2)
type1 = CONV_TYPE.get(n1.units)
type2 = CONV_TYPE.get(n2.units)
return BooleanValue(type1 == type2)
# Color functions
# ================
def _adjust_color(
color,
saturation=0.0,
lightness=0.0,
red=0.0,
green=0.0,
blue=0.0,
alpha=0.0,
**kwargs):
return __asc_color(
OPRT['+'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _scale_color(
color,
saturation=1.0,
lightness=1.0,
red=1.0,
green=1.0,
blue=1.0,
alpha=1.0,
**kwargs):
return __asc_color(
OPRT['*'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _change_color(
color,
saturation=None,
lightness=None,
red=None,
green=None,
blue=None,
alpha=None,
**kwargs):
return __asc_color(
None,
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _invert(color, **kwargs):
""" Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone.
"""
col = ColorValue(color)
args = [
255.0 - col.value[0],
255.0 - col.value[1],
255.0 - col.value[2],
col.value[3],
]
inverted = ColorValue(args)
return inverted
def _adjust_lightness(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _adjust_saturation(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _scale_lightness(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, 0, amount)
def _scale_saturation(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, amount, 0)
# Compass helpers
# ================
def _color_stops(*args, **kwargs):
raise NotImplementedError
def _elements_of_type(display, **kwargs):
return StringValue(ELEMENTS_OF_TYPE.get(StringValue(display).value, ''))
def _enumerate(s, b, e, **kwargs):
return ', '.join(
"%s%d" % (StringValue(s).value, x)
for x in range(int(b.value), int(e.value + 1)))
def _font_files(*args, **kwargs):
raise NotImplementedError
def _headings(a=None, b=None, **kwargs):
h = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
if not a or StringValue(a).value == 'all':
a, b = 1, 6
elif b is None:
b, a = a.value + 1, 1
return ', '.join(h[int(float(a) - 1):int(float(b))])
def _nest(*args, **kwargs):
return ', '.join(
' '.join(s.strip() for s in p)
if '&' not in p[1] else p[1].replace('&', p[0].strip())
for p in product(
*(StringValue(sel).value.split(',') for sel in args)
)
)
@check_pil
def _image_width(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[0], 'px'])
@check_pil
def _image_height(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[1], 'px'])
def _image_url(image, **kwargs):
return QuotedStringValue(image).value
def _inline_image(image, mimetype=None, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
if os.path.exists(path):
mimetype = StringValue(mimetype).value or mimetypes.guess_type(path)[0]
f = open(path, 'rb')
if PY3:
data = base64.b64encode(f.read()).decode('utf-8')
else:
data = base64.b64encode(f.read())
url = 'data:' + mimetype + ';base64,' + data
else:
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
url = '%s?_=NA' % QuotedStringValue(image).value
inline = 'url("%s")' % url
return StringValue(inline)
# Misc
# ====
def _if(cond, body, els, **kwargs):
if BooleanValue(cond).value:
return body
return els
def _sprite_position(*args):
pass
def _sprite_file(*args):
pass
def _sprite(*args):
pass
def _sprite_map(*args):
pass
def _sprite_map_name(*args):
pass
def _sprite_url(*args):
pass
def _opposite_position(*args):
pass
def _grad_point(*args):
pass
def _grad_color_stops(*args):
pass
def _nth(*args):
pass
def _join(*args):
pass
def _append(*args):
pass
FUNCTION_LIST = {
# RGB functions
'rgb:3': _rgb,
'rgba:4': _rgba,
'red:1': _red,
'green:1': _green,
'blue:1': _blue,
'mix:2': _mix,
'mix:3': _mix,
# HSL functions
'hsl:3': _hsl,
'hsla:4': _hsla,
'hue:1': _hue,
'saturation:1': _saturation,
'lightness:1': _lightness,
'adjust-hue:2': _adjust_hue,
'spin:2': _adjust_hue,
'lighten:2': _lighten,
'darken:2': _darken,
'saturate:2': _saturate,
'desaturate:2': _desaturate,
'grayscale:1': _grayscale,
'complement:1': _complement,
# Opacity functions
'alpha:1': _alpha,
'opacity:1': _alpha,
'opacify:2': _opacify,
'fadein:2': _opacify,
'fade-in:2': _opacify,
'transparentize:2': _transparentize,
'fadeout:2': _transparentize,
'fade-out:2': _transparentize,
# String functions
'quote:n': _quote,
'unquote:n': _unquote,
# Number functions
'percentage:1': _percentage,
'sin:1': _sin,
'cos:1': _cos,
'tan:1': _tan,
'abs:1': _abs,
'round:1': _round,
'ceil:1': _ceil,
'floor:1': _floor,
'pi:0': _pi,
# Introspection functions
'type-of:1': _type_of,
'unit:1': _unit,
'unitless:1': _unitless,
'comparable:2': _comparable,
# Color functions
'adjust-color:n': _adjust_color,
'scale-color:n': _scale_color,
'change-color:n': _change_color,
'adjust-lightness:2': _adjust_lightness,
'adjust-saturation:2': _adjust_saturation,
'scale-lightness:2': _scale_lightness,
'scale-saturation:2': _scale_saturation,
'invert:1': _invert,
# Compass helpers
'append-selector:2': _nest,
'color-stops:n': _color_stops,
'enumerate:3': _enumerate,
'elements-of-type:1': _elements_of_type,
'font-files:n': _font_files,
'headings:n': _headings,
'nest:n': _nest,
# Images functions
'image-url:1': _image_url,
'image-width:1': _image_width,
'image-height:1': _image_height,
'inline-image:1': _inline_image,
'inline-image:2': _inline_image,
# Not implemented
'sprite-map:1': _sprite_map,
'sprite:2': _sprite,
'sprite:3': _sprite,
'sprite:4': _sprite,
'sprite-map-name:1': _sprite_map_name,
'sprite-file:2': _sprite_file,
'sprite-url:1': _sprite_url,
'sprite-position:2': _sprite_position,
'sprite-position:3': _sprite_position,
'sprite-position:4': _sprite_position,
'opposite-position:n': _opposite_position,
'grad-point:n': _grad_point,
'grad-color-stops:n': _grad_color_stops,
'nth:2': _nth,
'first-value-of:1': _nth,
'join:2': _join,
'join:3': _join,
'append:2': _append,
'append:3': _append,
'if:3': _if,
'escape:1': _unquote,
'e:1': _unquote,
}
def __asc_color(op, color, saturation, lightness, red, green, blue, alpha):
if lightness or saturation:
color = hsl_op(op, color, 0, saturation, lightness)
if red or green or blue or alpha:
color = rgba_op(op, color, red, green, blue, alpha)
return color
def __get_size(path, **kwargs):
root = kwargs.get('root')
if path not in IMAGES:
if not os.path.exists(path):
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
return 0, 0
image = Image.open(path)
IMAGES[path] = image.size
return IMAGES[path]
# pylama:ignore=F0401,D
|
klen/python-scss | scss/function.py | _hsla | python | def _hsla(h, s, l, a, **kwargs):
res = colorsys.hls_to_rgb(float(h), float(l), float(s))
return ColorValue([x * 255.0 for x in res] + [float(a)]) | HSL with alpha channel color value. | train | https://github.com/klen/python-scss/blob/34fe985e6b43caa9f9b9bcd0dc433be4b2a1fdec/scss/function.py#L126-L130 | null | from __future__ import print_function
import base64
import colorsys
import math
import mimetypes
import os.path
import sys
from .compat import PY3
try:
from itertools import product
except ImportError:
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
from . import OPRT, CONV_TYPE, ELEMENTS_OF_TYPE
from .value import (
NumberValue, StringValue, QuotedStringValue, ColorValue, BooleanValue,
hsl_op, rgba_op)
try:
from PIL import Image
except ImportError:
Image = None
IMAGES = dict()
def warn(warning):
""" Write warning messages in stderr.
"""
print("\nWarning: %s" % str(warning), file=sys.stderr)
def unknown(*args, **kwargs):
""" Unknow scss function handler.
Simple return 'funcname(args)'
"""
name = kwargs.get('name', '')
return "%s(%s)" % (name, ', '.join(str(a) for a in args))
def check_pil(func):
""" PIL module checking decorator.
"""
def __wrapper(*args, **kwargs):
root = kwargs.get('root')
if not Image:
if root and root.get_opt('warn'):
warn("Images manipulation require PIL")
return 'none'
return func(*args, **kwargs)
return __wrapper
# RGB functions
# =============
def _rgb(r, g, b, **kwargs):
""" Converts an rgb(red, green, blue) triplet into a color.
"""
return _rgba(r, g, b, 1.0)
def _rgba(r, g, b, a, **kwargs):
""" Converts an rgba(red, green, blue, alpha) quadruplet into a color.
"""
return ColorValue((float(r), float(g), float(b), float(a)))
def _red(color, **kwargs):
""" Gets the red component of a color.
"""
return NumberValue(color.value[0])
def _green(color, **kwargs):
""" Gets the green component of a color.
"""
return NumberValue(color.value[1])
def _blue(color, **kwargs):
""" Gets the blue component of a color.
"""
return NumberValue(color.value[2])
def _mix(color1, color2, weight=0.5, **kwargs):
""" Mixes two colors together.
"""
weight = float(weight)
c1 = color1.value
c2 = color2.value
p = 0.0 if weight < 0 else 1.0 if weight > 1 else weight
w = p * 2 - 1
a = c1[3] - c2[3]
w1 = ((w if (w * a == -1) else (w + a) / (1 + w * a)) + 1) / 2.0
w2 = 1 - w1
q = [w1, w1, w1, p]
r = [w2, w2, w2, 1 - p]
return ColorValue([c1[i] * q[i] + c2[i] * r[i] for i in range(4)])
# HSL functions
# =============
def _hsl(h, s, l, **kwargs):
""" HSL color value.
"""
return _hsla(h, s, l, 1.0)
def _hue(color, **kwargs):
""" Get hue value of HSL color.
"""
h = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[0]
return NumberValue(h * 360.0)
def _lightness(color, **kwargs):
""" Get lightness value of HSL color.
"""
l = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[1]
return NumberValue((l * 100, '%'))
def _saturation(color, **kwargs):
""" Get saturation value of HSL color.
"""
s = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[2]
return NumberValue((s * 100, '%'))
def _adjust_hue(color, degrees, **kwargs):
return hsl_op(OPRT['+'], color, degrees, 0, 0)
def _lighten(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _darken(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, 0, amount)
def _saturate(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _desaturate(color, amount, **kwargs):
return hsl_op(OPRT['-'], color, 0, amount, 0)
def _grayscale(color, **kwargs):
return hsl_op(OPRT['-'], color, 0, 100, 0)
def _complement(color, **kwargs):
return hsl_op(OPRT['+'], color, 180.0, 0, 0)
# Opacity functions
# =================
def _alpha(color, **kwargs):
c = ColorValue(color).value
return NumberValue(c[3])
def _opacify(color, amount, **kwargs):
return rgba_op(OPRT['+'], color, 0, 0, 0, amount)
def _transparentize(color, amount, **kwargs):
return rgba_op(OPRT['-'], color, 0, 0, 0, amount)
# String functions
# =================
def _unquote(*args, **kwargs):
return StringValue(' '.join(str(s).strip("\"'") for s in args))
def _quote(*args, **kwargs):
return QuotedStringValue(' '.join(str(s) for s in args))
# Number functions
# =================
def _percentage(value, **kwargs):
value = NumberValue(value)
if not value.units == '%':
value.value *= 100
value.units = '%'
return value
def _abs(value, **kwargs):
return abs(float(value))
def _pi(**kwargs):
return NumberValue(math.pi)
def _sin(value, **kwargs):
return math.sin(value)
def _cos(value, **kwargs):
return math.cos(value)
def _tan(value, **kwargs):
return math.tan(value)
def _round(value, **kwargs):
return float(round(value))
def _ceil(value, **kwargs):
return float(math.ceil(value))
def _floor(value, **kwargs):
return float(math.floor(value))
# Introspection functions
# =======================
def _type_of(obj, **kwargs):
if isinstance(obj, BooleanValue):
return StringValue('bool')
if isinstance(obj, NumberValue):
return StringValue('number')
if isinstance(obj, QuotedStringValue):
return StringValue('string')
if isinstance(obj, ColorValue):
return StringValue('color')
if isinstance(obj, dict):
return StringValue('list')
return 'unknown'
def _unit(value, **kwargs):
return NumberValue(value).units
def _unitless(value, **kwargs):
if NumberValue(value).units:
return BooleanValue(False)
return BooleanValue(True)
def _comparable(n1, n2, **kwargs):
n1, n2 = NumberValue(n1), NumberValue(n2)
type1 = CONV_TYPE.get(n1.units)
type2 = CONV_TYPE.get(n2.units)
return BooleanValue(type1 == type2)
# Color functions
# ================
def _adjust_color(
color,
saturation=0.0,
lightness=0.0,
red=0.0,
green=0.0,
blue=0.0,
alpha=0.0,
**kwargs):
return __asc_color(
OPRT['+'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _scale_color(
color,
saturation=1.0,
lightness=1.0,
red=1.0,
green=1.0,
blue=1.0,
alpha=1.0,
**kwargs):
return __asc_color(
OPRT['*'],
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _change_color(
color,
saturation=None,
lightness=None,
red=None,
green=None,
blue=None,
alpha=None,
**kwargs):
return __asc_color(
None,
color,
saturation,
lightness,
red,
green,
blue,
alpha)
def _invert(color, **kwargs):
""" Returns the inverse (negative) of a color.
The red, green, and blue values are inverted, while the opacity is left alone.
"""
col = ColorValue(color)
args = [
255.0 - col.value[0],
255.0 - col.value[1],
255.0 - col.value[2],
col.value[3],
]
inverted = ColorValue(args)
return inverted
def _adjust_lightness(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, 0, amount)
def _adjust_saturation(color, amount, **kwargs):
return hsl_op(OPRT['+'], color, 0, amount, 0)
def _scale_lightness(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, 0, amount)
def _scale_saturation(color, amount, **kwargs):
return hsl_op(OPRT['*'], color, 0, amount, 0)
# Compass helpers
# ================
def _color_stops(*args, **kwargs):
raise NotImplementedError
def _elements_of_type(display, **kwargs):
return StringValue(ELEMENTS_OF_TYPE.get(StringValue(display).value, ''))
def _enumerate(s, b, e, **kwargs):
return ', '.join(
"%s%d" % (StringValue(s).value, x)
for x in range(int(b.value), int(e.value + 1)))
def _font_files(*args, **kwargs):
raise NotImplementedError
def _headings(a=None, b=None, **kwargs):
h = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
if not a or StringValue(a).value == 'all':
a, b = 1, 6
elif b is None:
b, a = a.value + 1, 1
return ', '.join(h[int(float(a) - 1):int(float(b))])
def _nest(*args, **kwargs):
return ', '.join(
' '.join(s.strip() for s in p)
if '&' not in p[1] else p[1].replace('&', p[0].strip())
for p in product(
*(StringValue(sel).value.split(',') for sel in args)
)
)
@check_pil
def _image_width(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[0], 'px'])
@check_pil
def _image_height(image, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
size = __get_size(path, root=root)
return NumberValue([size[1], 'px'])
def _image_url(image, **kwargs):
return QuotedStringValue(image).value
def _inline_image(image, mimetype=None, **kwargs):
root = kwargs.get('root')
path = os.path.abspath(
os.path.join(
root.get_opt('path'),
StringValue(image).value))
if os.path.exists(path):
mimetype = StringValue(mimetype).value or mimetypes.guess_type(path)[0]
f = open(path, 'rb')
if PY3:
data = base64.b64encode(f.read()).decode('utf-8')
else:
data = base64.b64encode(f.read())
url = 'data:' + mimetype + ';base64,' + data
else:
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
url = '%s?_=NA' % QuotedStringValue(image).value
inline = 'url("%s")' % url
return StringValue(inline)
# Misc
# ====
def _if(cond, body, els, **kwargs):
if BooleanValue(cond).value:
return body
return els
def _sprite_position(*args):
pass
def _sprite_file(*args):
pass
def _sprite(*args):
pass
def _sprite_map(*args):
pass
def _sprite_map_name(*args):
pass
def _sprite_url(*args):
pass
def _opposite_position(*args):
pass
def _grad_point(*args):
pass
def _grad_color_stops(*args):
pass
def _nth(*args):
pass
def _join(*args):
pass
def _append(*args):
pass
FUNCTION_LIST = {
# RGB functions
'rgb:3': _rgb,
'rgba:4': _rgba,
'red:1': _red,
'green:1': _green,
'blue:1': _blue,
'mix:2': _mix,
'mix:3': _mix,
# HSL functions
'hsl:3': _hsl,
'hsla:4': _hsla,
'hue:1': _hue,
'saturation:1': _saturation,
'lightness:1': _lightness,
'adjust-hue:2': _adjust_hue,
'spin:2': _adjust_hue,
'lighten:2': _lighten,
'darken:2': _darken,
'saturate:2': _saturate,
'desaturate:2': _desaturate,
'grayscale:1': _grayscale,
'complement:1': _complement,
# Opacity functions
'alpha:1': _alpha,
'opacity:1': _alpha,
'opacify:2': _opacify,
'fadein:2': _opacify,
'fade-in:2': _opacify,
'transparentize:2': _transparentize,
'fadeout:2': _transparentize,
'fade-out:2': _transparentize,
# String functions
'quote:n': _quote,
'unquote:n': _unquote,
# Number functions
'percentage:1': _percentage,
'sin:1': _sin,
'cos:1': _cos,
'tan:1': _tan,
'abs:1': _abs,
'round:1': _round,
'ceil:1': _ceil,
'floor:1': _floor,
'pi:0': _pi,
# Introspection functions
'type-of:1': _type_of,
'unit:1': _unit,
'unitless:1': _unitless,
'comparable:2': _comparable,
# Color functions
'adjust-color:n': _adjust_color,
'scale-color:n': _scale_color,
'change-color:n': _change_color,
'adjust-lightness:2': _adjust_lightness,
'adjust-saturation:2': _adjust_saturation,
'scale-lightness:2': _scale_lightness,
'scale-saturation:2': _scale_saturation,
'invert:1': _invert,
# Compass helpers
'append-selector:2': _nest,
'color-stops:n': _color_stops,
'enumerate:3': _enumerate,
'elements-of-type:1': _elements_of_type,
'font-files:n': _font_files,
'headings:n': _headings,
'nest:n': _nest,
# Images functions
'image-url:1': _image_url,
'image-width:1': _image_width,
'image-height:1': _image_height,
'inline-image:1': _inline_image,
'inline-image:2': _inline_image,
# Not implemented
'sprite-map:1': _sprite_map,
'sprite:2': _sprite,
'sprite:3': _sprite,
'sprite:4': _sprite,
'sprite-map-name:1': _sprite_map_name,
'sprite-file:2': _sprite_file,
'sprite-url:1': _sprite_url,
'sprite-position:2': _sprite_position,
'sprite-position:3': _sprite_position,
'sprite-position:4': _sprite_position,
'opposite-position:n': _opposite_position,
'grad-point:n': _grad_point,
'grad-color-stops:n': _grad_color_stops,
'nth:2': _nth,
'first-value-of:1': _nth,
'join:2': _join,
'join:3': _join,
'append:2': _append,
'append:3': _append,
'if:3': _if,
'escape:1': _unquote,
'e:1': _unquote,
}
def __asc_color(op, color, saturation, lightness, red, green, blue, alpha):
if lightness or saturation:
color = hsl_op(op, color, 0, saturation, lightness)
if red or green or blue or alpha:
color = rgba_op(op, color, red, green, blue, alpha)
return color
def __get_size(path, **kwargs):
root = kwargs.get('root')
if path not in IMAGES:
if not os.path.exists(path):
if root and root.get_opt('warn'):
warn("Not found image: %s" % path)
return 0, 0
image = Image.open(path)
IMAGES[path] = image.size
return IMAGES[path]
# pylama:ignore=F0401,D
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.