repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
mozilla/python-zeppelin | zeppelin/executors/notebook_executor.py | NotebookExecutor.create_notebook | python | def create_notebook(self, data):
r = requests.post('http://{0}/api/notebook'.format(self.zeppelin_url),
json=data)
self.notebook_id = r.json()['body'] | Create notebook under notebook directory. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/executors/notebook_executor.py#L22-L26 | null | class NotebookExecutor():
"""NotebookExecutor is a command line tool to execute a Zeppelin notebook."""
def __init__(self, notebook_name, output_path, zeppelin_url):
"""Initialize class object with attributes based on CLI inputs."""
self.notebook_name = notebook_name
self.output_path = output_path
self.zeppelin_url = zeppelin_url
def run_notebook(self):
"""Call API to execute notebook."""
requests.post('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
def wait_for_notebook_to_execute(self):
"""Wait for notebook to finish executing before continuing."""
while True:
r = requests.get('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
try:
data = r.json()['body']
if all(paragraph['status'] in ['FINISHED', 'ERROR'] for paragraph in data):
break
time.sleep(5)
continue
except KeyError as e:
print(e)
print(r.json())
elif r.status_code == 500:
print('Notebook is still busy executing. Checking again in 60 seconds...')
time.sleep(60)
continue
else:
print('ERROR: Unexpected return code: {}'.format(r.status_code))
sys.exit(1)
def get_executed_notebook(self):
"""Return the executed notebook."""
r = requests.get('http://{0}/api/notebook/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
return r.json()['body']
else:
print('ERROR: Could not get executed notebook.', file=sys.stderr)
sys.exit(1)
def save_notebook(self, body):
"""Save notebook depending on user provided output path."""
directory = os.path.dirname(self.output_path)
full_path = os.path.join(directory, self.notebook_name)
try:
with open(full_path, 'w') as fh:
fh.write(json.dumps(body, indent=2))
except ValueError:
print('ERROR: Could not save executed notebook to path: ' +
self.output_path +
' -- Please provide a valid absolute path.')
def execute_notebook(self, data):
"""Execute input notebook and save it to file.
If no output path given, the output will be printed to stdout.
If any errors occur from executing the notebook's paragraphs, they will
be displayed in stderr.
"""
self.create_notebook(data)
self.run_notebook()
self.wait_for_notebook_to_execute()
body = self.get_executed_notebook()
err = False
output = []
for paragraph in body['paragraphs']:
if 'results' in paragraph and paragraph['results']['code'] == 'ERROR':
output.append(paragraph['results']['msg'][0]['data'])
err = True
elif 'result' in paragraph and paragraph['result']['code'] == 'ERROR':
output.append(paragraph['result']['msg'])
err = True
[print(e.strip() + '\n', file=sys.stderr) for e in output if e]
if err:
sys.exit(1)
if not self.output_path:
print(json.dumps(body, indent=2))
else:
self.save_notebook(body)
|
mozilla/python-zeppelin | zeppelin/executors/notebook_executor.py | NotebookExecutor.wait_for_notebook_to_execute | python | def wait_for_notebook_to_execute(self):
while True:
r = requests.get('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
try:
data = r.json()['body']
if all(paragraph['status'] in ['FINISHED', 'ERROR'] for paragraph in data):
break
time.sleep(5)
continue
except KeyError as e:
print(e)
print(r.json())
elif r.status_code == 500:
print('Notebook is still busy executing. Checking again in 60 seconds...')
time.sleep(60)
continue
else:
print('ERROR: Unexpected return code: {}'.format(r.status_code))
sys.exit(1) | Wait for notebook to finish executing before continuing. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/executors/notebook_executor.py#L33-L57 | null | class NotebookExecutor():
"""NotebookExecutor is a command line tool to execute a Zeppelin notebook."""
def __init__(self, notebook_name, output_path, zeppelin_url):
"""Initialize class object with attributes based on CLI inputs."""
self.notebook_name = notebook_name
self.output_path = output_path
self.zeppelin_url = zeppelin_url
def create_notebook(self, data):
"""Create notebook under notebook directory."""
r = requests.post('http://{0}/api/notebook'.format(self.zeppelin_url),
json=data)
self.notebook_id = r.json()['body']
def run_notebook(self):
"""Call API to execute notebook."""
requests.post('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
def get_executed_notebook(self):
"""Return the executed notebook."""
r = requests.get('http://{0}/api/notebook/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
return r.json()['body']
else:
print('ERROR: Could not get executed notebook.', file=sys.stderr)
sys.exit(1)
def save_notebook(self, body):
"""Save notebook depending on user provided output path."""
directory = os.path.dirname(self.output_path)
full_path = os.path.join(directory, self.notebook_name)
try:
with open(full_path, 'w') as fh:
fh.write(json.dumps(body, indent=2))
except ValueError:
print('ERROR: Could not save executed notebook to path: ' +
self.output_path +
' -- Please provide a valid absolute path.')
def execute_notebook(self, data):
"""Execute input notebook and save it to file.
If no output path given, the output will be printed to stdout.
If any errors occur from executing the notebook's paragraphs, they will
be displayed in stderr.
"""
self.create_notebook(data)
self.run_notebook()
self.wait_for_notebook_to_execute()
body = self.get_executed_notebook()
err = False
output = []
for paragraph in body['paragraphs']:
if 'results' in paragraph and paragraph['results']['code'] == 'ERROR':
output.append(paragraph['results']['msg'][0]['data'])
err = True
elif 'result' in paragraph and paragraph['result']['code'] == 'ERROR':
output.append(paragraph['result']['msg'])
err = True
[print(e.strip() + '\n', file=sys.stderr) for e in output if e]
if err:
sys.exit(1)
if not self.output_path:
print(json.dumps(body, indent=2))
else:
self.save_notebook(body)
|
mozilla/python-zeppelin | zeppelin/executors/notebook_executor.py | NotebookExecutor.get_executed_notebook | python | def get_executed_notebook(self):
r = requests.get('http://{0}/api/notebook/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
return r.json()['body']
else:
print('ERROR: Could not get executed notebook.', file=sys.stderr)
sys.exit(1) | Return the executed notebook. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/executors/notebook_executor.py#L59-L67 | null | class NotebookExecutor():
"""NotebookExecutor is a command line tool to execute a Zeppelin notebook."""
def __init__(self, notebook_name, output_path, zeppelin_url):
"""Initialize class object with attributes based on CLI inputs."""
self.notebook_name = notebook_name
self.output_path = output_path
self.zeppelin_url = zeppelin_url
def create_notebook(self, data):
"""Create notebook under notebook directory."""
r = requests.post('http://{0}/api/notebook'.format(self.zeppelin_url),
json=data)
self.notebook_id = r.json()['body']
def run_notebook(self):
"""Call API to execute notebook."""
requests.post('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
def wait_for_notebook_to_execute(self):
"""Wait for notebook to finish executing before continuing."""
while True:
r = requests.get('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
try:
data = r.json()['body']
if all(paragraph['status'] in ['FINISHED', 'ERROR'] for paragraph in data):
break
time.sleep(5)
continue
except KeyError as e:
print(e)
print(r.json())
elif r.status_code == 500:
print('Notebook is still busy executing. Checking again in 60 seconds...')
time.sleep(60)
continue
else:
print('ERROR: Unexpected return code: {}'.format(r.status_code))
sys.exit(1)
def save_notebook(self, body):
"""Save notebook depending on user provided output path."""
directory = os.path.dirname(self.output_path)
full_path = os.path.join(directory, self.notebook_name)
try:
with open(full_path, 'w') as fh:
fh.write(json.dumps(body, indent=2))
except ValueError:
print('ERROR: Could not save executed notebook to path: ' +
self.output_path +
' -- Please provide a valid absolute path.')
def execute_notebook(self, data):
"""Execute input notebook and save it to file.
If no output path given, the output will be printed to stdout.
If any errors occur from executing the notebook's paragraphs, they will
be displayed in stderr.
"""
self.create_notebook(data)
self.run_notebook()
self.wait_for_notebook_to_execute()
body = self.get_executed_notebook()
err = False
output = []
for paragraph in body['paragraphs']:
if 'results' in paragraph and paragraph['results']['code'] == 'ERROR':
output.append(paragraph['results']['msg'][0]['data'])
err = True
elif 'result' in paragraph and paragraph['result']['code'] == 'ERROR':
output.append(paragraph['result']['msg'])
err = True
[print(e.strip() + '\n', file=sys.stderr) for e in output if e]
if err:
sys.exit(1)
if not self.output_path:
print(json.dumps(body, indent=2))
else:
self.save_notebook(body)
|
mozilla/python-zeppelin | zeppelin/executors/notebook_executor.py | NotebookExecutor.save_notebook | python | def save_notebook(self, body):
directory = os.path.dirname(self.output_path)
full_path = os.path.join(directory, self.notebook_name)
try:
with open(full_path, 'w') as fh:
fh.write(json.dumps(body, indent=2))
except ValueError:
print('ERROR: Could not save executed notebook to path: ' +
self.output_path +
' -- Please provide a valid absolute path.') | Save notebook depending on user provided output path. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/executors/notebook_executor.py#L69-L79 | null | class NotebookExecutor():
"""NotebookExecutor is a command line tool to execute a Zeppelin notebook."""
def __init__(self, notebook_name, output_path, zeppelin_url):
"""Initialize class object with attributes based on CLI inputs."""
self.notebook_name = notebook_name
self.output_path = output_path
self.zeppelin_url = zeppelin_url
def create_notebook(self, data):
"""Create notebook under notebook directory."""
r = requests.post('http://{0}/api/notebook'.format(self.zeppelin_url),
json=data)
self.notebook_id = r.json()['body']
def run_notebook(self):
"""Call API to execute notebook."""
requests.post('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
def wait_for_notebook_to_execute(self):
"""Wait for notebook to finish executing before continuing."""
while True:
r = requests.get('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
try:
data = r.json()['body']
if all(paragraph['status'] in ['FINISHED', 'ERROR'] for paragraph in data):
break
time.sleep(5)
continue
except KeyError as e:
print(e)
print(r.json())
elif r.status_code == 500:
print('Notebook is still busy executing. Checking again in 60 seconds...')
time.sleep(60)
continue
else:
print('ERROR: Unexpected return code: {}'.format(r.status_code))
sys.exit(1)
def get_executed_notebook(self):
"""Return the executed notebook."""
r = requests.get('http://{0}/api/notebook/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
return r.json()['body']
else:
print('ERROR: Could not get executed notebook.', file=sys.stderr)
sys.exit(1)
def execute_notebook(self, data):
"""Execute input notebook and save it to file.
If no output path given, the output will be printed to stdout.
If any errors occur from executing the notebook's paragraphs, they will
be displayed in stderr.
"""
self.create_notebook(data)
self.run_notebook()
self.wait_for_notebook_to_execute()
body = self.get_executed_notebook()
err = False
output = []
for paragraph in body['paragraphs']:
if 'results' in paragraph and paragraph['results']['code'] == 'ERROR':
output.append(paragraph['results']['msg'][0]['data'])
err = True
elif 'result' in paragraph and paragraph['result']['code'] == 'ERROR':
output.append(paragraph['result']['msg'])
err = True
[print(e.strip() + '\n', file=sys.stderr) for e in output if e]
if err:
sys.exit(1)
if not self.output_path:
print(json.dumps(body, indent=2))
else:
self.save_notebook(body)
|
mozilla/python-zeppelin | zeppelin/executors/notebook_executor.py | NotebookExecutor.execute_notebook | python | def execute_notebook(self, data):
self.create_notebook(data)
self.run_notebook()
self.wait_for_notebook_to_execute()
body = self.get_executed_notebook()
err = False
output = []
for paragraph in body['paragraphs']:
if 'results' in paragraph and paragraph['results']['code'] == 'ERROR':
output.append(paragraph['results']['msg'][0]['data'])
err = True
elif 'result' in paragraph and paragraph['result']['code'] == 'ERROR':
output.append(paragraph['result']['msg'])
err = True
[print(e.strip() + '\n', file=sys.stderr) for e in output if e]
if err:
sys.exit(1)
if not self.output_path:
print(json.dumps(body, indent=2))
else:
self.save_notebook(body) | Execute input notebook and save it to file.
If no output path given, the output will be printed to stdout.
If any errors occur from executing the notebook's paragraphs, they will
be displayed in stderr. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/executors/notebook_executor.py#L81-L113 | [
"def create_notebook(self, data):\n \"\"\"Create notebook under notebook directory.\"\"\"\n r = requests.post('http://{0}/api/notebook'.format(self.zeppelin_url),\n json=data)\n self.notebook_id = r.json()['body']\n",
"def run_notebook(self):\n \"\"\"Call API to execute notebook.\"\"\"\n requests.post('http://{0}/api/notebook/job/{1}'.format(\n self.zeppelin_url, self.notebook_id))\n",
"def wait_for_notebook_to_execute(self):\n \"\"\"Wait for notebook to finish executing before continuing.\"\"\"\n while True:\n r = requests.get('http://{0}/api/notebook/job/{1}'.format(\n self.zeppelin_url, self.notebook_id))\n\n if r.status_code == 200:\n try:\n data = r.json()['body']\n if all(paragraph['status'] in ['FINISHED', 'ERROR'] for paragraph in data):\n break\n time.sleep(5)\n continue\n except KeyError as e:\n print(e)\n print(r.json())\n\n elif r.status_code == 500:\n print('Notebook is still busy executing. Checking again in 60 seconds...')\n time.sleep(60)\n continue\n\n else:\n print('ERROR: Unexpected return code: {}'.format(r.status_code))\n sys.exit(1)\n",
"def get_executed_notebook(self):\n \"\"\"Return the executed notebook.\"\"\"\n r = requests.get('http://{0}/api/notebook/{1}'.format(\n self.zeppelin_url, self.notebook_id))\n if r.status_code == 200:\n return r.json()['body']\n else:\n print('ERROR: Could not get executed notebook.', file=sys.stderr)\n sys.exit(1)\n",
"def save_notebook(self, body):\n \"\"\"Save notebook depending on user provided output path.\"\"\"\n directory = os.path.dirname(self.output_path)\n full_path = os.path.join(directory, self.notebook_name)\n try:\n with open(full_path, 'w') as fh:\n fh.write(json.dumps(body, indent=2))\n except ValueError:\n print('ERROR: Could not save executed notebook to path: ' +\n self.output_path +\n ' -- Please provide a valid absolute path.')\n"
] | class NotebookExecutor():
"""NotebookExecutor is a command line tool to execute a Zeppelin notebook."""
def __init__(self, notebook_name, output_path, zeppelin_url):
"""Initialize class object with attributes based on CLI inputs."""
self.notebook_name = notebook_name
self.output_path = output_path
self.zeppelin_url = zeppelin_url
def create_notebook(self, data):
"""Create notebook under notebook directory."""
r = requests.post('http://{0}/api/notebook'.format(self.zeppelin_url),
json=data)
self.notebook_id = r.json()['body']
def run_notebook(self):
"""Call API to execute notebook."""
requests.post('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
def wait_for_notebook_to_execute(self):
"""Wait for notebook to finish executing before continuing."""
while True:
r = requests.get('http://{0}/api/notebook/job/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
try:
data = r.json()['body']
if all(paragraph['status'] in ['FINISHED', 'ERROR'] for paragraph in data):
break
time.sleep(5)
continue
except KeyError as e:
print(e)
print(r.json())
elif r.status_code == 500:
print('Notebook is still busy executing. Checking again in 60 seconds...')
time.sleep(60)
continue
else:
print('ERROR: Unexpected return code: {}'.format(r.status_code))
sys.exit(1)
def get_executed_notebook(self):
"""Return the executed notebook."""
r = requests.get('http://{0}/api/notebook/{1}'.format(
self.zeppelin_url, self.notebook_id))
if r.status_code == 200:
return r.json()['body']
else:
print('ERROR: Could not get executed notebook.', file=sys.stderr)
sys.exit(1)
def save_notebook(self, body):
"""Save notebook depending on user provided output path."""
directory = os.path.dirname(self.output_path)
full_path = os.path.join(directory, self.notebook_name)
try:
with open(full_path, 'w') as fh:
fh.write(json.dumps(body, indent=2))
except ValueError:
print('ERROR: Could not save executed notebook to path: ' +
self.output_path +
' -- Please provide a valid absolute path.')
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.build_header | python | def build_header(self, title):
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out | Generate the header for the Markdown file. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L41-L53 | null | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.build_code | python | def build_code(self, lang, body):
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```") | Wrap text with markdown specific flavour. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L60-L64 | [
"def build_markdown(self, lang, body):\n \"\"\"Append paragraphs body to output string.\"\"\"\n if body is not None:\n self.out.append(body)\n"
] | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.process_input | python | def process_input(self, paragraph):
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body) | Parse paragraph for the language of the code and the code itself. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L66-L83 | [
"def build_code(self, lang, body):\n \"\"\"Wrap text with markdown specific flavour.\"\"\"\n self.out.append(\"```\" + lang)\n self.build_markdown(lang, body)\n self.out.append(\"```\")\n"
] | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.create_md_row | python | def create_md_row(self, row, header=False):
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md) | Translate row into markdown format. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L85-L104 | null | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.process_date_created | python | def process_date_created(self, text):
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date | Set date_created to the oldest date (date created). | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L106-L112 | null | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.process_date_updated | python | def process_date_updated(self, text):
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date | Set date_updated to the most recent date (updated date). | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L114-L120 | null | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.build_output | python | def build_output(self, fout):
fout.write('\n'.join([s for s in self.out])) | Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L129-L135 | null | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.convert | python | def convert(self, json, fout):
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) | Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L137-L145 | [
"def build_header(self, title):\n \"\"\"Generate the header for the Markdown file.\"\"\"\n header = ['---',\n 'title: ' + title,\n 'author(s): ' + self.user,\n 'tags: ',\n 'created_at: ' + str(self.date_created),\n 'updated_at: ' + str(self.date_updated),\n 'tldr: ',\n 'thumbnail: ',\n '---']\n\n self.out = header + self.out\n",
"def build_output(self, fout):\n \"\"\"Squash self.out into string.\n\n Join every line in self.out with a new line and write the\n result to the output file.\n \"\"\"\n fout.write('\\n'.join([s for s in self.out]))\n",
"def build_markdown_body(self, text):\n \"\"\"Generate the body for the Markdown file.\n\n - processes each json block one by one\n - for each block, process:\n - the creator of the notebook (user)\n - the date the notebook was created\n - the date the notebook was last updated\n - the input by detecting the editor language\n - the output by detecting the output format\n \"\"\"\n key_options = {\n 'dateCreated': self.process_date_created,\n 'dateUpdated': self.process_date_updated,\n 'title': self.process_title,\n 'text': self.process_input\n }\n\n for paragraph in text['paragraphs']:\n if 'user' in paragraph:\n self.user = paragraph['user']\n\n for key, handler in key_options.items():\n if key in paragraph:\n handler(paragraph[key])\n\n if self._RESULT_KEY in paragraph:\n self.process_results(paragraph)\n"
] | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
# write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.build_markdown_body | python | def build_markdown_body(self, text):
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph) | Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L147-L174 | [
"def process_results(self, paragraph):\n \"\"\"Route Zeppelin output types to corresponding handlers.\"\"\"\n"
] | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.build_table | python | def build_table(self, msg):
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row) | Format each row of the table. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L180-L187 | [
"def create_md_row(self, row, header=False):\n \"\"\"Translate row into markdown format.\"\"\"\n if not row:\n return\n cols = row.split('\\t')\n if len(cols) == 1:\n self.out.append(cols[0])\n else:\n col_md = '|'\n underline_md = '|'\n\n if cols:\n for col in cols:\n col_md += col + '|'\n underline_md += '-|'\n\n if header:\n self.out.append(col_md + '\\n' + underline_md)\n else:\n self.out.append(col_md)\n"
] | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_image(self, msg):
"""Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory.
"""
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index))
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | MarkdownConverter.build_image | python | def build_image(self, msg):
result = self.find_message(msg)
if result is None:
return
self.index += 1
images_path = 'images'
if self.directory:
images_path = os.path.join(self.directory, images_path)
if not os.path.isdir(images_path):
os.makedirs(images_path)
with open('{0}/output_{1}.png'.format(images_path, self.index), 'wb') as fh:
self.write_image_to_disk(msg, result, fh)
self.out.append(
'\n\n'.format(images_path, self.index)) | Convert base64 encoding to png.
Strips msg of the base64 image encoding and outputs
the images to the specified directory. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L189-L213 | [
"def find_message(self, msg):\n \"\"\"Use regex to find encoded image.\"\"\"\n",
"def write_image_to_disk(self, msg, result, fh):\n \"\"\"Decode message to PNG and write to disk.\"\"\"\n"
] | class MarkdownConverter(abc.ABC):
"""ZeppelinConverter is a utility to convert Zeppelin raw json into Markdown."""
@abc.abstractproperty
def _RESULT_KEY(self):
pass
def __init__(self, input_filename, output_filename, directory, user='anonymous',
date_created='N/A', date_updated='N/A'):
"""Initialize class object with attributes based on CLI inputs."""
self.index = 0
self.input_filename = input_filename
self.output_filename = output_filename
self.directory = directory
self.user = user
self.date_created = date_created
self.date_updated = date_updated
self.out = []
# To add support for other output types, add the file type to
# the dictionary and create the necessary function to handle it.
self.output_options = {
'HTML': self.build_image,
'TEXT': self.build_text,
'TABLE': self.build_table
}
def build_header(self, title):
"""Generate the header for the Markdown file."""
header = ['---',
'title: ' + title,
'author(s): ' + self.user,
'tags: ',
'created_at: ' + str(self.date_created),
'updated_at: ' + str(self.date_updated),
'tldr: ',
'thumbnail: ',
'---']
self.out = header + self.out
def build_markdown(self, lang, body):
"""Append paragraphs body to output string."""
if body is not None:
self.out.append(body)
def build_code(self, lang, body):
"""Wrap text with markdown specific flavour."""
self.out.append("```" + lang)
self.build_markdown(lang, body)
self.out.append("```")
def process_input(self, paragraph):
"""Parse paragraph for the language of the code and the code itself."""
try:
lang, body = paragraph.split(None, 1)
except ValueError:
lang, body = paragraph, None
if not lang.strip().startswith('%'):
lang = 'scala'
body = paragraph.strip()
else:
lang = lang.strip()[1:]
if lang == 'md':
self.build_markdown(lang, body)
else:
self.build_code(lang, body)
def create_md_row(self, row, header=False):
"""Translate row into markdown format."""
if not row:
return
cols = row.split('\t')
if len(cols) == 1:
self.out.append(cols[0])
else:
col_md = '|'
underline_md = '|'
if cols:
for col in cols:
col_md += col + '|'
underline_md += '-|'
if header:
self.out.append(col_md + '\n' + underline_md)
else:
self.out.append(col_md)
def process_date_created(self, text):
"""Set date_created to the oldest date (date created)."""
date = parse(text)
if self.date_created == 'N/A':
self.date_created = date
if date < self.date_created:
self.date_created = date
def process_date_updated(self, text):
"""Set date_updated to the most recent date (updated date)."""
date = parse(text)
if self.date_updated == 'N/A':
self.date_updated = date
if date > self.date_updated:
self.date_updated = date
def process_title(self, text):
"""Append hashtags before the title.
This is done to bold the title in markdown.
"""
self.out.append('#### ' + text)
def build_output(self, fout):
"""Squash self.out into string.
Join every line in self.out with a new line and write the
result to the output file.
"""
fout.write('\n'.join([s for s in self.out]))
def convert(self, json, fout):
"""Convert json to markdown.
Takes in a .json file as input and convert it to Markdown format,
saving the generated .png images into ./images.
"""
self.build_markdown_body(json) # create the body
self.build_header(json['name']) # create the md header
self.build_output(fout) # write body and header to output file
def build_markdown_body(self, text):
"""Generate the body for the Markdown file.
- processes each json block one by one
- for each block, process:
- the creator of the notebook (user)
- the date the notebook was created
- the date the notebook was last updated
- the input by detecting the editor language
- the output by detecting the output format
"""
key_options = {
'dateCreated': self.process_date_created,
'dateUpdated': self.process_date_updated,
'title': self.process_title,
'text': self.process_input
}
for paragraph in text['paragraphs']:
if 'user' in paragraph:
self.user = paragraph['user']
for key, handler in key_options.items():
if key in paragraph:
handler(paragraph[key])
if self._RESULT_KEY in paragraph:
self.process_results(paragraph)
def build_text(self, msg):
"""Add text to output array."""
self.out.append(msg)
def build_table(self, msg):
"""Format each row of the table."""
rows = msg.split('\n')
if rows:
header_row, *body_rows = rows
self.create_md_row(header_row, True)
for row in body_rows:
self.create_md_row(row)
@abc.abstractmethod
def find_message(self, msg):
"""Use regex to find encoded image."""
@abc.abstractmethod
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
@abc.abstractmethod
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | LegacyConverter.write_image_to_disk | python | def write_image_to_disk(self, msg, result, fh):
cairosvg.svg2png(bytestring=msg.encode('utf-8'), write_to=fh) | Decode message to PNG and write to disk. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L237-L239 | null | class LegacyConverter(MarkdownConverter):
"""LegacyConverter converts Zeppelin version 0.6.2 notebooks to Markdown."""
_RESULT_KEY = 'result'
def find_message(self, msg):
"""Use regex to find encoded image."""
return re.search('xml version', msg)
def process_results(self, paragraph):
"""Route Zeppelin output types to corresponding handlers."""
if 'result' in paragraph and paragraph['result']['msg']:
msg = paragraph['result']['msg']
self.output_options[paragraph['result']['type']](msg)
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | LegacyConverter.process_results | python | def process_results(self, paragraph):
if 'result' in paragraph and paragraph['result']['msg']:
msg = paragraph['result']['msg']
self.output_options[paragraph['result']['type']](msg) | Route Zeppelin output types to corresponding handlers. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L241-L245 | null | class LegacyConverter(MarkdownConverter):
"""LegacyConverter converts Zeppelin version 0.6.2 notebooks to Markdown."""
_RESULT_KEY = 'result'
def find_message(self, msg):
"""Use regex to find encoded image."""
return re.search('xml version', msg)
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
cairosvg.svg2png(bytestring=msg.encode('utf-8'), write_to=fh)
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | NewConverter.write_image_to_disk | python | def write_image_to_disk(self, msg, result, fh):
fh.write(base64.b64decode(result.group(1).encode('utf-8'))) | Decode message to PNG and write to disk. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L257-L259 | null | class NewConverter(MarkdownConverter):
"""NewConverter converts Zeppelin version 0.7.1 notebooks to Markdown."""
_RESULT_KEY = 'results'
def find_message(self, msg):
"""Use regex to find encoded image."""
return re.search('base64,(.*?)"', msg)
def process_results(self, paragraph):
"""Routes Zeppelin output types to corresponding handlers."""
if 'editorMode' in paragraph['config']:
mode = paragraph['config']['editorMode'].split('/')[-1]
if 'results' in paragraph and paragraph['results']['msg']:
msg = paragraph['results']['msg'][0]
if mode not in ('text', 'markdown'):
self.output_options[msg['type']](msg['data'])
|
mozilla/python-zeppelin | zeppelin/converters/markdown.py | NewConverter.process_results | python | def process_results(self, paragraph):
if 'editorMode' in paragraph['config']:
mode = paragraph['config']['editorMode'].split('/')[-1]
if 'results' in paragraph and paragraph['results']['msg']:
msg = paragraph['results']['msg'][0]
if mode not in ('text', 'markdown'):
self.output_options[msg['type']](msg['data']) | Routes Zeppelin output types to corresponding handlers. | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/converters/markdown.py#L261-L268 | null | class NewConverter(MarkdownConverter):
"""NewConverter converts Zeppelin version 0.7.1 notebooks to Markdown."""
_RESULT_KEY = 'results'
def find_message(self, msg):
"""Use regex to find encoded image."""
return re.search('base64,(.*?)"', msg)
def write_image_to_disk(self, msg, result, fh):
"""Decode message to PNG and write to disk."""
fh.write(base64.b64decode(result.group(1).encode('utf-8')))
|
mozilla/python-zeppelin | zeppelin/cli/execute.py | main | python | def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='path_to_notebook_json', required=True,
help='Zeppelin notebook input file (.json)')
parser.add_argument('-o', dest='output_path', default=sys.stdout,
help='Path to save rendered output file (.json) (optional)')
parser.add_argument('-u', dest='zeppelin_url', default='localhost:8890',
help='Zeppelin URL (optional)')
args = parser.parse_args()
with open(args.path_to_notebook_json, 'rb') as notebook:
try:
t = json.load(notebook)
notebook_name = os.path.basename(args.path_to_notebook_json)
if args.output_path is sys.stdout:
args.output_path = ''
elif not os.path.isdir(args.output_path):
raise ValueError('Output path given is not valid directory.')
output_path = os.path.join(args.output_path, '')
notebook_executor = NotebookExecutor(notebook_name, output_path,
args.zeppelin_url)
notebook_executor.execute_notebook(t)
except ValueError as err:
print(err)
sys.exit(1) | Entry point.
- Execute notebook
- Save output to either file or display it in stderr
- Display errors during the run if they exist | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/cli/execute.py#L13-L45 | [
"def execute_notebook(self, data):\n \"\"\"Execute input notebook and save it to file.\n\n If no output path given, the output will be printed to stdout.\n\n If any errors occur from executing the notebook's paragraphs, they will\n be displayed in stderr.\n \"\"\"\n self.create_notebook(data)\n self.run_notebook()\n self.wait_for_notebook_to_execute()\n body = self.get_executed_notebook()\n\n err = False\n output = []\n for paragraph in body['paragraphs']:\n if 'results' in paragraph and paragraph['results']['code'] == 'ERROR':\n output.append(paragraph['results']['msg'][0]['data'])\n err = True\n\n elif 'result' in paragraph and paragraph['result']['code'] == 'ERROR':\n output.append(paragraph['result']['msg'])\n err = True\n\n [print(e.strip() + '\\n', file=sys.stderr) for e in output if e]\n\n if err:\n sys.exit(1)\n\n if not self.output_path:\n print(json.dumps(body, indent=2))\n else:\n self.save_notebook(body)\n"
] | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import argparse
import json
import sys
from ..executors.notebook_executor import NotebookExecutor
if __name__ == '__main__':
main()
|
mozilla/python-zeppelin | zeppelin/cli/convert.py | main | python | def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', dest='in_filename', required=True,
help='Zeppelin notebook input file (.json)')
parser.add_argument('-o', dest='out_filename',
help='Markdown output file (.md) (optional)')
args = parser.parse_args()
directory = ''
if args.out_filename:
directory = os.path.dirname(args.out_filename)
args.out_filename = os.path.basename(args.out_filename)
args.out_filename = os.path.splitext(args.out_filename)[0]
args.out_filename = args.out_filename if args.out_filename else 'knowledge'
else:
args.out_filename = 'knowledge'
with open(args.in_filename, 'rb') as raw:
try:
t = json.load(raw)
full_path = os.path.join(directory, args.out_filename + '.md')
except ValueError:
print('ERROR: Invalid JSON format')
sys.exit(1)
version = get_version(t)
if version == '0.7.1':
zeppelin_converter = NewConverter(args.in_filename, args.out_filename,
directory)
elif version == '0.6.2':
zeppelin_converter = LegacyConverter(args.in_filename, args.out_filename,
directory)
with open(full_path, 'w') as fout:
zeppelin_converter.convert(t, fout) | Entry point.
- Loads in Zeppelin notebook
- Gets the version of the notebook
- Converts it into markdown format | train | https://github.com/mozilla/python-zeppelin/blob/76ce6b7608ef6cf7b807bd5d850a58ea6a59ef07/zeppelin/cli/convert.py#L22-L62 | [
"def get_version(text):\n \"\"\"Return correct version of Zeppelin file based on JSON format.\"\"\"\n if 'results' in text['paragraphs'][0]:\n return '0.7.1'\n else:\n return '0.6.2'\n",
"def convert(self, json, fout):\n \"\"\"Convert json to markdown.\n\n Takes in a .json file as input and convert it to Markdown format,\n saving the generated .png images into ./images.\n \"\"\"\n self.build_markdown_body(json) # create the body\n self.build_header(json['name']) # create the md header\n self.build_output(fout) # write body and header to output file\n"
] | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import argparse
import json
import sys
from ..converters.markdown import NewConverter
from ..converters.markdown import LegacyConverter
def get_version(text):
"""Return correct version of Zeppelin file based on JSON format."""
if 'results' in text['paragraphs'][0]:
return '0.7.1'
else:
return '0.6.2'
if __name__ == '__main__':
main()
|
zebpalmer/WeatherAlerts | weatheralerts/alert.py | _ts_parse | python | def _ts_parse(ts):
dt = datetime.strptime(ts[:19],"%Y-%m-%dT%H:%M:%S")
if ts[19] == '+':
dt -= timedelta(hours=int(ts[20:22]),minutes=int(ts[23:]))
elif ts[19] == '-':
dt += timedelta(hours=int(ts[20:22]),minutes=int(ts[23:]))
return dt.replace(tzinfo=pytz.UTC) | Parse alert timestamp, return UTC datetime object to maintain Python 2 compatibility. | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/alert.py#L4-L11 | null | import pytz
from datetime import datetime, timedelta
class Alert(object):
"""
Create an alert object with the cap dict created from cap xml parser.
This object won't be pretty... it's mostly a bunch of property methods to
sanitize and muck around with the raw cap data. Using individual properties
and methods instead of a special getattr so that we can more easily standardize
the Alert API. This may be revisted in the future as the project becomes more
stable.
"""
def __init__(self, cap_dict):
self._raw = cap_dict
@property
def _serialized(self):
"""Provides a sanitized & serializeable dict of the alert mainly for forward & backwards compatibility"""
return {'title': self.title,
'summary': self.summary,
'areadesc': self.areadesc,
'event': self.event,
'samecodes': self.samecodes,
'zonecodes': self.zonecodes,
'expiration': self.expiration,
'updated': self.updated,
'effective': self.effective,
'published': self.published,
'severity': self.severity,
'category': self.category,
'urgency': self.urgency,
'msgtype': self.msgtype,
'link': self.link,
}
@property
def title(self):
"""Alert title"""
return self._raw['title']
@property
def summary(self):
"""Alert summary"""
return self._raw['summary']
@property
def areadesc(self):
"""A more generic area description"""
return self._raw['cap:areaDesc']
@property
def event(self):
"""alert event type"""
return self._raw['cap:event']
@property
def samecodes(self):
"""samecodes for the alert area"""
return self._raw['samecodes']
@property
def zonecodes(self):
"""UCG codes for the alert area (these are sometimes referred to as county codes,
but that's not quite accurate)"""
try:
return self._raw['UCG']
except Exception:
return []
@property
def expiration(self):
"""Expiration of the alert (datetime object)"""
ts = _ts_parse(self._raw['cap:expires'])
return ts
@property
def updated(self):
"""Last update to the alert (datetime object)"""
ts = _ts_parse(self._raw['updated'])
return ts
@property
def effective(self):
"""Effective timestamp of the alert (datetime object)"""
ts = _ts_parse(self._raw['cap:effective'])
return ts
@property
def published(self):
"""Published timestamp of the alert (datetime object)"""
ts = _ts_parse(self._raw['published'])
return ts
@property
def severity(self):
"""Severity of alert i.e. minor, major, etc"""
return self._raw['cap:severity']
@property
def category(self):
"""Category of alert i.e. Met, Civil, etc"""
return self._raw['cap:category']
@property
def urgency(self):
"""Alert urgency"""
return self._raw['cap:urgency']
@property
def msgtype(self):
return self._raw['cap:msgType']
@property
def link(self):
return self._raw['id']
|
zebpalmer/WeatherAlerts | weatheralerts/alert.py | Alert._serialized | python | def _serialized(self):
return {'title': self.title,
'summary': self.summary,
'areadesc': self.areadesc,
'event': self.event,
'samecodes': self.samecodes,
'zonecodes': self.zonecodes,
'expiration': self.expiration,
'updated': self.updated,
'effective': self.effective,
'published': self.published,
'severity': self.severity,
'category': self.category,
'urgency': self.urgency,
'msgtype': self.msgtype,
'link': self.link,
} | Provides a sanitized & serializeable dict of the alert mainly for forward & backwards compatibility | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/alert.py#L29-L46 | null | class Alert(object):
"""
Create an alert object with the cap dict created from cap xml parser.
This object won't be pretty... it's mostly a bunch of property methods to
sanitize and muck around with the raw cap data. Using individual properties
and methods instead of a special getattr so that we can more easily standardize
the Alert API. This may be revisted in the future as the project becomes more
stable.
"""
def __init__(self, cap_dict):
self._raw = cap_dict
@property
@property
def title(self):
"""Alert title"""
return self._raw['title']
@property
def summary(self):
"""Alert summary"""
return self._raw['summary']
@property
def areadesc(self):
"""A more generic area description"""
return self._raw['cap:areaDesc']
@property
def event(self):
"""alert event type"""
return self._raw['cap:event']
@property
def samecodes(self):
"""samecodes for the alert area"""
return self._raw['samecodes']
@property
def zonecodes(self):
"""UCG codes for the alert area (these are sometimes referred to as county codes,
but that's not quite accurate)"""
try:
return self._raw['UCG']
except Exception:
return []
@property
def expiration(self):
"""Expiration of the alert (datetime object)"""
ts = _ts_parse(self._raw['cap:expires'])
return ts
@property
def updated(self):
"""Last update to the alert (datetime object)"""
ts = _ts_parse(self._raw['updated'])
return ts
@property
def effective(self):
"""Effective timestamp of the alert (datetime object)"""
ts = _ts_parse(self._raw['cap:effective'])
return ts
@property
def published(self):
"""Published timestamp of the alert (datetime object)"""
ts = _ts_parse(self._raw['published'])
return ts
@property
def severity(self):
"""Severity of alert i.e. minor, major, etc"""
return self._raw['cap:severity']
@property
def category(self):
"""Category of alert i.e. Met, Civil, etc"""
return self._raw['cap:category']
@property
def urgency(self):
"""Alert urgency"""
return self._raw['cap:urgency']
@property
def msgtype(self):
return self._raw['cap:msgType']
@property
def link(self):
return self._raw['id']
|
zebpalmer/WeatherAlerts | weatheralerts/feed.py | AlertsFeed._get_feed_cache | python | def _get_feed_cache(self):
feed_cache = None
if os.path.exists(self._feed_cache_file):
maxage = datetime.now() - timedelta(minutes=self._cachetime)
file_ts = datetime.fromtimestamp(os.stat(self._feed_cache_file).st_mtime)
if file_ts > maxage:
try:
with open(self._feed_cache_file, 'rb') as cache:
feed_cache = cache.read()
finally:
pass
return feed_cache | If a recent cache exists, return it, else return None | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/feed.py#L24-L36 | null | class AlertsFeed(object):
"""Fetch the NWS CAP/XML Alerts feed for the US or a single state if requested
if an instance of the GeoDB class has already been created, you can pass that
as well to save some processing
This will cache the feed (in local tempdir) for up to 'maxage' minutes"""
def __init__(self, state='US', maxage=3):
self._alerts = ''
self._feedstatus = ''
self._cachetime = maxage
self._state = state
self._cachedir = str(tempfile.gettempdir()) + '/'
self._feed_cache_file = self._cachedir + 'nws_alerts_py{0}_{1}.cache'.format(sys.version_info[0], self._state)
self._cachetime = 3
self._raw = None
def raw_cap(self, refresh=False):
"""
Raw xml(cap) of the the feed. If a valid cache is available
it is used, else a new copy of the feed is grabbed
Note: you can force refresh here, if you do, don't also manually call refresh
"""
if refresh is True:
self._raw = self.refresh()
if self._raw is None:
self._raw = self._get_feed_cache()
if self._raw is None:
self._raw = self.refresh()
return self._raw
def refresh(self):
"""
NOTE: You probably don't want to call this... This does not update the alerts loaded
in the WeatherAlerts object, only the underlying feed. This is only used internally now and as such,
will likely be deprecated soon. Please call `WeatherAlerts.refresh()` instead.
"""
self._raw = self._get_nws_feed()
self._save_feed_cache(self._raw)
return self._raw
def _get_nws_feed(self):
"""get nws alert feed, and cache it"""
url = '''http://alerts.weather.gov/cap/%s.php?x=0''' % (str(self._state).lower())
# pylint: disable=E1103
xml = requests.get(url).content
return xml
def _save_feed_cache(self, raw_feed):
with open(self._feed_cache_file, 'wb') as cache:
cache.write(raw_feed)
|
zebpalmer/WeatherAlerts | weatheralerts/feed.py | AlertsFeed.raw_cap | python | def raw_cap(self, refresh=False):
if refresh is True:
self._raw = self.refresh()
if self._raw is None:
self._raw = self._get_feed_cache()
if self._raw is None:
self._raw = self.refresh()
return self._raw | Raw xml(cap) of the the feed. If a valid cache is available
it is used, else a new copy of the feed is grabbed
Note: you can force refresh here, if you do, don't also manually call refresh | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/feed.py#L38-L50 | [
"def _get_feed_cache(self):\n \"\"\"If a recent cache exists, return it, else return None\"\"\"\n feed_cache = None\n if os.path.exists(self._feed_cache_file):\n maxage = datetime.now() - timedelta(minutes=self._cachetime)\n file_ts = datetime.fromtimestamp(os.stat(self._feed_cache_file).st_mtime)\n if file_ts > maxage:\n try:\n with open(self._feed_cache_file, 'rb') as cache:\n feed_cache = cache.read()\n finally:\n pass\n return feed_cache\n",
"def refresh(self):\n \"\"\"\n NOTE: You probably don't want to call this... This does not update the alerts loaded\n in the WeatherAlerts object, only the underlying feed. This is only used internally now and as such,\n will likely be deprecated soon. Please call `WeatherAlerts.refresh()` instead.\n \"\"\"\n self._raw = self._get_nws_feed()\n self._save_feed_cache(self._raw)\n return self._raw\n"
] | class AlertsFeed(object):
"""Fetch the NWS CAP/XML Alerts feed for the US or a single state if requested
if an instance of the GeoDB class has already been created, you can pass that
as well to save some processing
This will cache the feed (in local tempdir) for up to 'maxage' minutes"""
def __init__(self, state='US', maxage=3):
self._alerts = ''
self._feedstatus = ''
self._cachetime = maxage
self._state = state
self._cachedir = str(tempfile.gettempdir()) + '/'
self._feed_cache_file = self._cachedir + 'nws_alerts_py{0}_{1}.cache'.format(sys.version_info[0], self._state)
self._cachetime = 3
self._raw = None
def _get_feed_cache(self):
"""If a recent cache exists, return it, else return None"""
feed_cache = None
if os.path.exists(self._feed_cache_file):
maxage = datetime.now() - timedelta(minutes=self._cachetime)
file_ts = datetime.fromtimestamp(os.stat(self._feed_cache_file).st_mtime)
if file_ts > maxage:
try:
with open(self._feed_cache_file, 'rb') as cache:
feed_cache = cache.read()
finally:
pass
return feed_cache
def refresh(self):
"""
NOTE: You probably don't want to call this... This does not update the alerts loaded
in the WeatherAlerts object, only the underlying feed. This is only used internally now and as such,
will likely be deprecated soon. Please call `WeatherAlerts.refresh()` instead.
"""
self._raw = self._get_nws_feed()
self._save_feed_cache(self._raw)
return self._raw
def _get_nws_feed(self):
"""get nws alert feed, and cache it"""
url = '''http://alerts.weather.gov/cap/%s.php?x=0''' % (str(self._state).lower())
# pylint: disable=E1103
xml = requests.get(url).content
return xml
def _save_feed_cache(self, raw_feed):
with open(self._feed_cache_file, 'wb') as cache:
cache.write(raw_feed)
|
zebpalmer/WeatherAlerts | weatheralerts/feed.py | AlertsFeed.refresh | python | def refresh(self):
self._raw = self._get_nws_feed()
self._save_feed_cache(self._raw)
return self._raw | NOTE: You probably don't want to call this... This does not update the alerts loaded
in the WeatherAlerts object, only the underlying feed. This is only used internally now and as such,
will likely be deprecated soon. Please call `WeatherAlerts.refresh()` instead. | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/feed.py#L52-L60 | [
"def _get_nws_feed(self):\n \"\"\"get nws alert feed, and cache it\"\"\"\n url = '''http://alerts.weather.gov/cap/%s.php?x=0''' % (str(self._state).lower())\n # pylint: disable=E1103\n xml = requests.get(url).content\n return xml\n",
"def _save_feed_cache(self, raw_feed):\n with open(self._feed_cache_file, 'wb') as cache:\n cache.write(raw_feed)\n"
] | class AlertsFeed(object):
"""Fetch the NWS CAP/XML Alerts feed for the US or a single state if requested
if an instance of the GeoDB class has already been created, you can pass that
as well to save some processing
This will cache the feed (in local tempdir) for up to 'maxage' minutes"""
def __init__(self, state='US', maxage=3):
self._alerts = ''
self._feedstatus = ''
self._cachetime = maxage
self._state = state
self._cachedir = str(tempfile.gettempdir()) + '/'
self._feed_cache_file = self._cachedir + 'nws_alerts_py{0}_{1}.cache'.format(sys.version_info[0], self._state)
self._cachetime = 3
self._raw = None
def _get_feed_cache(self):
"""If a recent cache exists, return it, else return None"""
feed_cache = None
if os.path.exists(self._feed_cache_file):
maxage = datetime.now() - timedelta(minutes=self._cachetime)
file_ts = datetime.fromtimestamp(os.stat(self._feed_cache_file).st_mtime)
if file_ts > maxage:
try:
with open(self._feed_cache_file, 'rb') as cache:
feed_cache = cache.read()
finally:
pass
return feed_cache
def raw_cap(self, refresh=False):
"""
Raw xml(cap) of the the feed. If a valid cache is available
it is used, else a new copy of the feed is grabbed
Note: you can force refresh here, if you do, don't also manually call refresh
"""
if refresh is True:
self._raw = self.refresh()
if self._raw is None:
self._raw = self._get_feed_cache()
if self._raw is None:
self._raw = self.refresh()
return self._raw
def _get_nws_feed(self):
"""get nws alert feed, and cache it"""
url = '''http://alerts.weather.gov/cap/%s.php?x=0''' % (str(self._state).lower())
# pylint: disable=E1103
xml = requests.get(url).content
return xml
def _save_feed_cache(self, raw_feed):
with open(self._feed_cache_file, 'wb') as cache:
cache.write(raw_feed)
|
zebpalmer/WeatherAlerts | weatheralerts/feed.py | AlertsFeed._get_nws_feed | python | def _get_nws_feed(self):
url = '''http://alerts.weather.gov/cap/%s.php?x=0''' % (str(self._state).lower())
# pylint: disable=E1103
xml = requests.get(url).content
return xml | get nws alert feed, and cache it | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/feed.py#L62-L67 | null | class AlertsFeed(object):
"""Fetch the NWS CAP/XML Alerts feed for the US or a single state if requested
if an instance of the GeoDB class has already been created, you can pass that
as well to save some processing
This will cache the feed (in local tempdir) for up to 'maxage' minutes"""
def __init__(self, state='US', maxage=3):
self._alerts = ''
self._feedstatus = ''
self._cachetime = maxage
self._state = state
self._cachedir = str(tempfile.gettempdir()) + '/'
self._feed_cache_file = self._cachedir + 'nws_alerts_py{0}_{1}.cache'.format(sys.version_info[0], self._state)
self._cachetime = 3
self._raw = None
def _get_feed_cache(self):
"""If a recent cache exists, return it, else return None"""
feed_cache = None
if os.path.exists(self._feed_cache_file):
maxage = datetime.now() - timedelta(minutes=self._cachetime)
file_ts = datetime.fromtimestamp(os.stat(self._feed_cache_file).st_mtime)
if file_ts > maxage:
try:
with open(self._feed_cache_file, 'rb') as cache:
feed_cache = cache.read()
finally:
pass
return feed_cache
def raw_cap(self, refresh=False):
"""
Raw xml(cap) of the the feed. If a valid cache is available
it is used, else a new copy of the feed is grabbed
Note: you can force refresh here, if you do, don't also manually call refresh
"""
if refresh is True:
self._raw = self.refresh()
if self._raw is None:
self._raw = self._get_feed_cache()
if self._raw is None:
self._raw = self.refresh()
return self._raw
def refresh(self):
"""
NOTE: You probably don't want to call this... This does not update the alerts loaded
in the WeatherAlerts object, only the underlying feed. This is only used internally now and as such,
will likely be deprecated soon. Please call `WeatherAlerts.refresh()` instead.
"""
self._raw = self._get_nws_feed()
self._save_feed_cache(self._raw)
return self._raw
def _save_feed_cache(self, raw_feed):
with open(self._feed_cache_file, 'wb') as cache:
cache.write(raw_feed)
|
zebpalmer/WeatherAlerts | weatheralerts/weather_alerts.py | WeatherAlerts.load_alerts | python | def load_alerts(self):
self._feed = AlertsFeed(state=self.scope, maxage=self.cachetime)
parser = CapParser(self._feed.raw_cap(), geo=self.geo)
self._alerts = parser.get_alerts() | NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/weather_alerts.py#L48-L56 | [
"def get_alerts(self):\n \"\"\"\n Public method that parses\n \"\"\"\n emptyfeed = \"There are no active watches, warnings or advisories\"\n alerts = []\n if emptyfeed in str(self._raw_cap):\n pass\n else:\n main_dom = minidom.parseString(self._raw_cap)\n xml_entries = main_dom.getElementsByTagName('entry')\n # title is currently first so we can detect an empty cap feed\n\n for dom in xml_entries:\n # parse the entry to a temp 'entry' dict\n entry = self._parse_entry(dom)\n\n # perform some cleanup before creating an object\n # entry['locations'] = self.build_locations(entry) # FIXME: remove?\n entry['target_areas'] = build_target_areas(entry)\n\n alert = Alert(entry)\n alerts.append(alert)\n del entry\n del alert\n\n return alerts\n",
"def raw_cap(self, refresh=False):\n \"\"\"\n Raw xml(cap) of the the feed. If a valid cache is available\n it is used, else a new copy of the feed is grabbed\n Note: you can force refresh here, if you do, don't also manually call refresh\n \"\"\"\n if refresh is True:\n self._raw = self.refresh()\n if self._raw is None:\n self._raw = self._get_feed_cache()\n if self._raw is None:\n self._raw = self.refresh()\n return self._raw\n"
] | class WeatherAlerts(object):
"""
WeatherAlerts object that controls interaction with the NWS CAP alerts feed as well as various geo data sources.
Most interaction from users, scripts, etc will be through the api provided by this `WeatherAlerts` class.
So, as we approach a more stable project, the API in this class will also become more stable.
* Defaults to National Feed, it can be quite large at times, you probably don't want to parse it very often.
* Set `state` to see all alerts on your state feed.
* For local alerts only, set `samecodes` to a single samecode string, or list of samecode strings.
* `cachetime` is set in minutes, default is 3.
"""
def __init__(self, state=None, samecodes=None, load=True, cachetime=3):
"""
WeatherAlerts Init
"""
self._alerts = None
self._feed = None
self.daemononly = True
self.geo = GeoDB()
self.state = state
self.scope = 'US'
self.cachetime = cachetime
if samecodes is None:
self.samecodes = None
elif isinstance(samecodes, str):
self.samecodes = []
self.samecodes.append(samecodes)
elif isinstance(samecodes, list):
self.samecodes = samecodes
else:
raise Exception("Samecode must be string, or list of strings")
if self.state is not None:
self.scope = self.state
elif samecodes is not None:
self.scope = self.geo.getfeedscope(self.samecodes)
if load is True:
self.load_alerts()
def refresh(self, force=False):
"""
Refresh the alerts list. set `force` to True to force pulling a new list from the NWS, otherwise
it'll only pull a new list if the cached copy is expired. (see cachetime)
"""
if force is True:
self._feed.refresh()
self._alerts = CapParser(self._feed.raw_cap(), geo=self.geo).get_alerts()
@property
def alerts(self):
"""returns the alerts list. If samecode(s) are specified when the WeatherAlerts object is created,
this will only return alerts for those samecodes. If no samecodes were given, it'll return all alerts for the
state if one was specified otherwise for the entire U.S.
"""
if self.samecodes is not None:
temp = []
for alert in self._alerts:
for code in alert.samecodes:
if code in self.samecodes:
temp.append(alert)
return temp
else:
return self._alerts
# noinspection PyProtectedMember
@property
def _serialized_alerts(self):
"""returns a list of alerts, jsonified"""
return [x._serialized for x in self.alerts]
def samecode_alerts(self, samecode):
"""Returns alerts for a ()single) SAME geocode. Only useful if you didn't specify samecodes when the WeatherAlerts
object was created."""
return [x for x in self._alerts if samecode in x.samecodes]
def county_state_alerts(self, county, state):
"""Given a county and state, return alerts"""
samecode = self.geo.lookup_samecode(county, state)
return self.samecode_alerts(samecode)
def event_state_counties(self):
"""DEPRECATED: this will be moved elsewhere or dropped in the near future, stop using it.
Return an event type and it's state(s) and counties (consolidated)"""
# FIXME: most of this logic should be moved to the alert instance and refactored
counties = ''
state = ''
for alert in self._alerts:
locations = []
states = []
for samecode in alert.samecodes:
county, state = self.geo.lookup_county_state(samecode)
locations.append((county, state))
if state not in states:
states.append(state)
for state in states:
counties = [x for x, y in locations if y == state]
counties_clean = str(counties).strip("[']")
print("{0}: {1} - {2}".format(alert.event, state, counties_clean))
|
zebpalmer/WeatherAlerts | weatheralerts/weather_alerts.py | WeatherAlerts.refresh | python | def refresh(self, force=False):
if force is True:
self._feed.refresh()
self._alerts = CapParser(self._feed.raw_cap(), geo=self.geo).get_alerts() | Refresh the alerts list. set `force` to True to force pulling a new list from the NWS, otherwise
it'll only pull a new list if the cached copy is expired. (see cachetime) | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/weather_alerts.py#L58-L65 | [
"def get_alerts(self):\n \"\"\"\n Public method that parses\n \"\"\"\n emptyfeed = \"There are no active watches, warnings or advisories\"\n alerts = []\n if emptyfeed in str(self._raw_cap):\n pass\n else:\n main_dom = minidom.parseString(self._raw_cap)\n xml_entries = main_dom.getElementsByTagName('entry')\n # title is currently first so we can detect an empty cap feed\n\n for dom in xml_entries:\n # parse the entry to a temp 'entry' dict\n entry = self._parse_entry(dom)\n\n # perform some cleanup before creating an object\n # entry['locations'] = self.build_locations(entry) # FIXME: remove?\n entry['target_areas'] = build_target_areas(entry)\n\n alert = Alert(entry)\n alerts.append(alert)\n del entry\n del alert\n\n return alerts\n"
] | class WeatherAlerts(object):
"""
WeatherAlerts object that controls interaction with the NWS CAP alerts feed as well as various geo data sources.
Most interaction from users, scripts, etc will be through the api provided by this `WeatherAlerts` class.
So, as we approach a more stable project, the API in this class will also become more stable.
* Defaults to National Feed, it can be quite large at times, you probably don't want to parse it very often.
* Set `state` to see all alerts on your state feed.
* For local alerts only, set `samecodes` to a single samecode string, or list of samecode strings.
* `cachetime` is set in minutes, default is 3.
"""
def __init__(self, state=None, samecodes=None, load=True, cachetime=3):
"""
WeatherAlerts Init
"""
self._alerts = None
self._feed = None
self.daemononly = True
self.geo = GeoDB()
self.state = state
self.scope = 'US'
self.cachetime = cachetime
if samecodes is None:
self.samecodes = None
elif isinstance(samecodes, str):
self.samecodes = []
self.samecodes.append(samecodes)
elif isinstance(samecodes, list):
self.samecodes = samecodes
else:
raise Exception("Samecode must be string, or list of strings")
if self.state is not None:
self.scope = self.state
elif samecodes is not None:
self.scope = self.geo.getfeedscope(self.samecodes)
if load is True:
self.load_alerts()
def load_alerts(self):
"""
NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts
"""
self._feed = AlertsFeed(state=self.scope, maxage=self.cachetime)
parser = CapParser(self._feed.raw_cap(), geo=self.geo)
self._alerts = parser.get_alerts()
@property
def alerts(self):
"""returns the alerts list. If samecode(s) are specified when the WeatherAlerts object is created,
this will only return alerts for those samecodes. If no samecodes were given, it'll return all alerts for the
state if one was specified otherwise for the entire U.S.
"""
if self.samecodes is not None:
temp = []
for alert in self._alerts:
for code in alert.samecodes:
if code in self.samecodes:
temp.append(alert)
return temp
else:
return self._alerts
# noinspection PyProtectedMember
@property
def _serialized_alerts(self):
"""returns a list of alerts, jsonified"""
return [x._serialized for x in self.alerts]
def samecode_alerts(self, samecode):
"""Returns alerts for a ()single) SAME geocode. Only useful if you didn't specify samecodes when the WeatherAlerts
object was created."""
return [x for x in self._alerts if samecode in x.samecodes]
def county_state_alerts(self, county, state):
"""Given a county and state, return alerts"""
samecode = self.geo.lookup_samecode(county, state)
return self.samecode_alerts(samecode)
def event_state_counties(self):
"""DEPRECATED: this will be moved elsewhere or dropped in the near future, stop using it.
Return an event type and it's state(s) and counties (consolidated)"""
# FIXME: most of this logic should be moved to the alert instance and refactored
counties = ''
state = ''
for alert in self._alerts:
locations = []
states = []
for samecode in alert.samecodes:
county, state = self.geo.lookup_county_state(samecode)
locations.append((county, state))
if state not in states:
states.append(state)
for state in states:
counties = [x for x, y in locations if y == state]
counties_clean = str(counties).strip("[']")
print("{0}: {1} - {2}".format(alert.event, state, counties_clean))
|
zebpalmer/WeatherAlerts | weatheralerts/weather_alerts.py | WeatherAlerts.alerts | python | def alerts(self):
if self.samecodes is not None:
temp = []
for alert in self._alerts:
for code in alert.samecodes:
if code in self.samecodes:
temp.append(alert)
return temp
else:
return self._alerts | returns the alerts list. If samecode(s) are specified when the WeatherAlerts object is created,
this will only return alerts for those samecodes. If no samecodes were given, it'll return all alerts for the
state if one was specified otherwise for the entire U.S. | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/weather_alerts.py#L68-L81 | null | class WeatherAlerts(object):
"""
WeatherAlerts object that controls interaction with the NWS CAP alerts feed as well as various geo data sources.
Most interaction from users, scripts, etc will be through the api provided by this `WeatherAlerts` class.
So, as we approach a more stable project, the API in this class will also become more stable.
* Defaults to National Feed, it can be quite large at times, you probably don't want to parse it very often.
* Set `state` to see all alerts on your state feed.
* For local alerts only, set `samecodes` to a single samecode string, or list of samecode strings.
* `cachetime` is set in minutes, default is 3.
"""
def __init__(self, state=None, samecodes=None, load=True, cachetime=3):
"""
WeatherAlerts Init
"""
self._alerts = None
self._feed = None
self.daemononly = True
self.geo = GeoDB()
self.state = state
self.scope = 'US'
self.cachetime = cachetime
if samecodes is None:
self.samecodes = None
elif isinstance(samecodes, str):
self.samecodes = []
self.samecodes.append(samecodes)
elif isinstance(samecodes, list):
self.samecodes = samecodes
else:
raise Exception("Samecode must be string, or list of strings")
if self.state is not None:
self.scope = self.state
elif samecodes is not None:
self.scope = self.geo.getfeedscope(self.samecodes)
if load is True:
self.load_alerts()
def load_alerts(self):
"""
NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts
"""
self._feed = AlertsFeed(state=self.scope, maxage=self.cachetime)
parser = CapParser(self._feed.raw_cap(), geo=self.geo)
self._alerts = parser.get_alerts()
def refresh(self, force=False):
"""
Refresh the alerts list. set `force` to True to force pulling a new list from the NWS, otherwise
it'll only pull a new list if the cached copy is expired. (see cachetime)
"""
if force is True:
self._feed.refresh()
self._alerts = CapParser(self._feed.raw_cap(), geo=self.geo).get_alerts()
@property
# noinspection PyProtectedMember
@property
def _serialized_alerts(self):
"""returns a list of alerts, jsonified"""
return [x._serialized for x in self.alerts]
def samecode_alerts(self, samecode):
"""Returns alerts for a ()single) SAME geocode. Only useful if you didn't specify samecodes when the WeatherAlerts
object was created."""
return [x for x in self._alerts if samecode in x.samecodes]
def county_state_alerts(self, county, state):
"""Given a county and state, return alerts"""
samecode = self.geo.lookup_samecode(county, state)
return self.samecode_alerts(samecode)
def event_state_counties(self):
"""DEPRECATED: this will be moved elsewhere or dropped in the near future, stop using it.
Return an event type and it's state(s) and counties (consolidated)"""
# FIXME: most of this logic should be moved to the alert instance and refactored
counties = ''
state = ''
for alert in self._alerts:
locations = []
states = []
for samecode in alert.samecodes:
county, state = self.geo.lookup_county_state(samecode)
locations.append((county, state))
if state not in states:
states.append(state)
for state in states:
counties = [x for x, y in locations if y == state]
counties_clean = str(counties).strip("[']")
print("{0}: {1} - {2}".format(alert.event, state, counties_clean))
|
zebpalmer/WeatherAlerts | weatheralerts/weather_alerts.py | WeatherAlerts.samecode_alerts | python | def samecode_alerts(self, samecode):
return [x for x in self._alerts if samecode in x.samecodes] | Returns alerts for a ()single) SAME geocode. Only useful if you didn't specify samecodes when the WeatherAlerts
object was created. | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/weather_alerts.py#L89-L92 | null | class WeatherAlerts(object):
"""
WeatherAlerts object that controls interaction with the NWS CAP alerts feed as well as various geo data sources.
Most interaction from users, scripts, etc will be through the api provided by this `WeatherAlerts` class.
So, as we approach a more stable project, the API in this class will also become more stable.
* Defaults to National Feed, it can be quite large at times, you probably don't want to parse it very often.
* Set `state` to see all alerts on your state feed.
* For local alerts only, set `samecodes` to a single samecode string, or list of samecode strings.
* `cachetime` is set in minutes, default is 3.
"""
def __init__(self, state=None, samecodes=None, load=True, cachetime=3):
"""
WeatherAlerts Init
"""
self._alerts = None
self._feed = None
self.daemononly = True
self.geo = GeoDB()
self.state = state
self.scope = 'US'
self.cachetime = cachetime
if samecodes is None:
self.samecodes = None
elif isinstance(samecodes, str):
self.samecodes = []
self.samecodes.append(samecodes)
elif isinstance(samecodes, list):
self.samecodes = samecodes
else:
raise Exception("Samecode must be string, or list of strings")
if self.state is not None:
self.scope = self.state
elif samecodes is not None:
self.scope = self.geo.getfeedscope(self.samecodes)
if load is True:
self.load_alerts()
def load_alerts(self):
"""
NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts
"""
self._feed = AlertsFeed(state=self.scope, maxage=self.cachetime)
parser = CapParser(self._feed.raw_cap(), geo=self.geo)
self._alerts = parser.get_alerts()
def refresh(self, force=False):
"""
Refresh the alerts list. set `force` to True to force pulling a new list from the NWS, otherwise
it'll only pull a new list if the cached copy is expired. (see cachetime)
"""
if force is True:
self._feed.refresh()
self._alerts = CapParser(self._feed.raw_cap(), geo=self.geo).get_alerts()
@property
def alerts(self):
"""returns the alerts list. If samecode(s) are specified when the WeatherAlerts object is created,
this will only return alerts for those samecodes. If no samecodes were given, it'll return all alerts for the
state if one was specified otherwise for the entire U.S.
"""
if self.samecodes is not None:
temp = []
for alert in self._alerts:
for code in alert.samecodes:
if code in self.samecodes:
temp.append(alert)
return temp
else:
return self._alerts
# noinspection PyProtectedMember
@property
def _serialized_alerts(self):
"""returns a list of alerts, jsonified"""
return [x._serialized for x in self.alerts]
def county_state_alerts(self, county, state):
"""Given a county and state, return alerts"""
samecode = self.geo.lookup_samecode(county, state)
return self.samecode_alerts(samecode)
def event_state_counties(self):
"""DEPRECATED: this will be moved elsewhere or dropped in the near future, stop using it.
Return an event type and it's state(s) and counties (consolidated)"""
# FIXME: most of this logic should be moved to the alert instance and refactored
counties = ''
state = ''
for alert in self._alerts:
locations = []
states = []
for samecode in alert.samecodes:
county, state = self.geo.lookup_county_state(samecode)
locations.append((county, state))
if state not in states:
states.append(state)
for state in states:
counties = [x for x, y in locations if y == state]
counties_clean = str(counties).strip("[']")
print("{0}: {1} - {2}".format(alert.event, state, counties_clean))
|
zebpalmer/WeatherAlerts | weatheralerts/weather_alerts.py | WeatherAlerts.county_state_alerts | python | def county_state_alerts(self, county, state):
samecode = self.geo.lookup_samecode(county, state)
return self.samecode_alerts(samecode) | Given a county and state, return alerts | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/weather_alerts.py#L94-L97 | [
"def samecode_alerts(self, samecode):\n \"\"\"Returns alerts for a ()single) SAME geocode. Only useful if you didn't specify samecodes when the WeatherAlerts\n object was created.\"\"\"\n return [x for x in self._alerts if samecode in x.samecodes]\n"
] | class WeatherAlerts(object):
"""
WeatherAlerts object that controls interaction with the NWS CAP alerts feed as well as various geo data sources.
Most interaction from users, scripts, etc will be through the api provided by this `WeatherAlerts` class.
So, as we approach a more stable project, the API in this class will also become more stable.
* Defaults to National Feed, it can be quite large at times, you probably don't want to parse it very often.
* Set `state` to see all alerts on your state feed.
* For local alerts only, set `samecodes` to a single samecode string, or list of samecode strings.
* `cachetime` is set in minutes, default is 3.
"""
def __init__(self, state=None, samecodes=None, load=True, cachetime=3):
"""
WeatherAlerts Init
"""
self._alerts = None
self._feed = None
self.daemononly = True
self.geo = GeoDB()
self.state = state
self.scope = 'US'
self.cachetime = cachetime
if samecodes is None:
self.samecodes = None
elif isinstance(samecodes, str):
self.samecodes = []
self.samecodes.append(samecodes)
elif isinstance(samecodes, list):
self.samecodes = samecodes
else:
raise Exception("Samecode must be string, or list of strings")
if self.state is not None:
self.scope = self.state
elif samecodes is not None:
self.scope = self.geo.getfeedscope(self.samecodes)
if load is True:
self.load_alerts()
def load_alerts(self):
"""
NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts
"""
self._feed = AlertsFeed(state=self.scope, maxage=self.cachetime)
parser = CapParser(self._feed.raw_cap(), geo=self.geo)
self._alerts = parser.get_alerts()
def refresh(self, force=False):
"""
Refresh the alerts list. set `force` to True to force pulling a new list from the NWS, otherwise
it'll only pull a new list if the cached copy is expired. (see cachetime)
"""
if force is True:
self._feed.refresh()
self._alerts = CapParser(self._feed.raw_cap(), geo=self.geo).get_alerts()
@property
def alerts(self):
"""returns the alerts list. If samecode(s) are specified when the WeatherAlerts object is created,
this will only return alerts for those samecodes. If no samecodes were given, it'll return all alerts for the
state if one was specified otherwise for the entire U.S.
"""
if self.samecodes is not None:
temp = []
for alert in self._alerts:
for code in alert.samecodes:
if code in self.samecodes:
temp.append(alert)
return temp
else:
return self._alerts
# noinspection PyProtectedMember
@property
def _serialized_alerts(self):
"""returns a list of alerts, jsonified"""
return [x._serialized for x in self.alerts]
def samecode_alerts(self, samecode):
"""Returns alerts for a ()single) SAME geocode. Only useful if you didn't specify samecodes when the WeatherAlerts
object was created."""
return [x for x in self._alerts if samecode in x.samecodes]
def event_state_counties(self):
"""DEPRECATED: this will be moved elsewhere or dropped in the near future, stop using it.
Return an event type and it's state(s) and counties (consolidated)"""
# FIXME: most of this logic should be moved to the alert instance and refactored
counties = ''
state = ''
for alert in self._alerts:
locations = []
states = []
for samecode in alert.samecodes:
county, state = self.geo.lookup_county_state(samecode)
locations.append((county, state))
if state not in states:
states.append(state)
for state in states:
counties = [x for x, y in locations if y == state]
counties_clean = str(counties).strip("[']")
print("{0}: {1} - {2}".format(alert.event, state, counties_clean))
|
zebpalmer/WeatherAlerts | weatheralerts/weather_alerts.py | WeatherAlerts.event_state_counties | python | def event_state_counties(self):
# FIXME: most of this logic should be moved to the alert instance and refactored
counties = ''
state = ''
for alert in self._alerts:
locations = []
states = []
for samecode in alert.samecodes:
county, state = self.geo.lookup_county_state(samecode)
locations.append((county, state))
if state not in states:
states.append(state)
for state in states:
counties = [x for x, y in locations if y == state]
counties_clean = str(counties).strip("[']")
print("{0}: {1} - {2}".format(alert.event, state, counties_clean)) | DEPRECATED: this will be moved elsewhere or dropped in the near future, stop using it.
Return an event type and it's state(s) and counties (consolidated) | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/weather_alerts.py#L99-L116 | null | class WeatherAlerts(object):
"""
WeatherAlerts object that controls interaction with the NWS CAP alerts feed as well as various geo data sources.
Most interaction from users, scripts, etc will be through the api provided by this `WeatherAlerts` class.
So, as we approach a more stable project, the API in this class will also become more stable.
* Defaults to National Feed, it can be quite large at times, you probably don't want to parse it very often.
* Set `state` to see all alerts on your state feed.
* For local alerts only, set `samecodes` to a single samecode string, or list of samecode strings.
* `cachetime` is set in minutes, default is 3.
"""
def __init__(self, state=None, samecodes=None, load=True, cachetime=3):
"""
WeatherAlerts Init
"""
self._alerts = None
self._feed = None
self.daemononly = True
self.geo = GeoDB()
self.state = state
self.scope = 'US'
self.cachetime = cachetime
if samecodes is None:
self.samecodes = None
elif isinstance(samecodes, str):
self.samecodes = []
self.samecodes.append(samecodes)
elif isinstance(samecodes, list):
self.samecodes = samecodes
else:
raise Exception("Samecode must be string, or list of strings")
if self.state is not None:
self.scope = self.state
elif samecodes is not None:
self.scope = self.geo.getfeedscope(self.samecodes)
if load is True:
self.load_alerts()
def load_alerts(self):
"""
NOTE: use refresh() instead of this, if you are just needing to refresh the alerts list
Gets raw xml (cap) from the Alerts feed, throws it into the parser
and ends up with a list of alerts object, which it stores to self._alerts
"""
self._feed = AlertsFeed(state=self.scope, maxage=self.cachetime)
parser = CapParser(self._feed.raw_cap(), geo=self.geo)
self._alerts = parser.get_alerts()
def refresh(self, force=False):
"""
Refresh the alerts list. set `force` to True to force pulling a new list from the NWS, otherwise
it'll only pull a new list if the cached copy is expired. (see cachetime)
"""
if force is True:
self._feed.refresh()
self._alerts = CapParser(self._feed.raw_cap(), geo=self.geo).get_alerts()
@property
def alerts(self):
"""returns the alerts list. If samecode(s) are specified when the WeatherAlerts object is created,
this will only return alerts for those samecodes. If no samecodes were given, it'll return all alerts for the
state if one was specified otherwise for the entire U.S.
"""
if self.samecodes is not None:
temp = []
for alert in self._alerts:
for code in alert.samecodes:
if code in self.samecodes:
temp.append(alert)
return temp
else:
return self._alerts
# noinspection PyProtectedMember
@property
def _serialized_alerts(self):
"""returns a list of alerts, jsonified"""
return [x._serialized for x in self.alerts]
def samecode_alerts(self, samecode):
"""Returns alerts for a ()single) SAME geocode. Only useful if you didn't specify samecodes when the WeatherAlerts
object was created."""
return [x for x in self._alerts if samecode in x.samecodes]
def county_state_alerts(self, county, state):
"""Given a county and state, return alerts"""
samecode = self.geo.lookup_samecode(county, state)
return self.samecode_alerts(samecode)
|
zebpalmer/WeatherAlerts | weatheralerts/cap.py | build_target_areas | python | def build_target_areas(entry):
target_areas = []
areas = str(entry['cap:areaDesc']).split(';')
for area in areas:
target_areas.append(area.strip())
return target_areas | Cleanup the raw target areas description string | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/cap.py#L7-L13 | null | # pylint: disable=W0403
from weatheralerts.geo import GeoDB
from weatheralerts.alert import Alert
from xml.dom import minidom
class CapParser(object):
"""
Parses the xml from the alert feed, creates and returns a list of alert objects.
FIXME: This is slow, messy, and painful to look at. I'll be totally rewriting it shortly.
"""
def __init__(self, raw_cap, geo=None):
self._raw_cap = raw_cap
if geo is not None:
self.geo = geo
else:
self.geo = GeoDB()
self.samecodes = self.geo.samecodes
self._cap_tags = ['title', 'id', 'updated', 'published', 'link', 'summary', 'cap:event', 'cap:effective',
'cap:expires', 'cap:status', 'cap:msgType', 'cap:category', 'cap:urgency', 'cap:severity',
'cap:certainty', 'cap:areaDesc', 'cap:geocode']
def get_alerts(self):
"""
Public method that parses
"""
emptyfeed = "There are no active watches, warnings or advisories"
alerts = []
if emptyfeed in str(self._raw_cap):
pass
else:
main_dom = minidom.parseString(self._raw_cap)
xml_entries = main_dom.getElementsByTagName('entry')
# title is currently first so we can detect an empty cap feed
for dom in xml_entries:
# parse the entry to a temp 'entry' dict
entry = self._parse_entry(dom)
# perform some cleanup before creating an object
# entry['locations'] = self.build_locations(entry) # FIXME: remove?
entry['target_areas'] = build_target_areas(entry)
alert = Alert(entry)
alerts.append(alert)
del entry
del alert
return alerts
def _parse_entry(self, dom):
"""Sigh...."""
entry = {}
for tag in self._cap_tags:
# we need to handle the geocodes a bit differently
if tag == 'cap:geocode':
try:
geotypes = []
# FIXME: this will parse VTEC and add it to the feed as well, that's both a feature and a bug
for item in dom.getElementsByTagName('valueName'):
geotypes.append(str(item.firstChild.data))
n = 0
for geotype in geotypes:
try:
entry[geotype] = str(dom.getElementsByTagName('value')[n].firstChild.data).split(' ')
except AttributeError:
pass
n = n + 1
finally:
try:
entry['samecodes'] = [x for x in entry['FIPS6'] if str(x).isdigit()] # handle bad nws data
except Exception:
entry['samecodes'] = []
else:
try:
entry[tag] = dom.getElementsByTagName(tag)[0].firstChild.data
except AttributeError:
entry[tag] = ''
return entry
|
zebpalmer/WeatherAlerts | weatheralerts/cap.py | CapParser.get_alerts | python | def get_alerts(self):
emptyfeed = "There are no active watches, warnings or advisories"
alerts = []
if emptyfeed in str(self._raw_cap):
pass
else:
main_dom = minidom.parseString(self._raw_cap)
xml_entries = main_dom.getElementsByTagName('entry')
# title is currently first so we can detect an empty cap feed
for dom in xml_entries:
# parse the entry to a temp 'entry' dict
entry = self._parse_entry(dom)
# perform some cleanup before creating an object
# entry['locations'] = self.build_locations(entry) # FIXME: remove?
entry['target_areas'] = build_target_areas(entry)
alert = Alert(entry)
alerts.append(alert)
del entry
del alert
return alerts | Public method that parses | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/cap.py#L35-L61 | [
"def build_target_areas(entry):\n \"\"\"Cleanup the raw target areas description string\"\"\"\n target_areas = []\n areas = str(entry['cap:areaDesc']).split(';')\n for area in areas:\n target_areas.append(area.strip())\n return target_areas\n"
] | class CapParser(object):
"""
Parses the xml from the alert feed, creates and returns a list of alert objects.
FIXME: This is slow, messy, and painful to look at. I'll be totally rewriting it shortly.
"""
def __init__(self, raw_cap, geo=None):
self._raw_cap = raw_cap
if geo is not None:
self.geo = geo
else:
self.geo = GeoDB()
self.samecodes = self.geo.samecodes
self._cap_tags = ['title', 'id', 'updated', 'published', 'link', 'summary', 'cap:event', 'cap:effective',
'cap:expires', 'cap:status', 'cap:msgType', 'cap:category', 'cap:urgency', 'cap:severity',
'cap:certainty', 'cap:areaDesc', 'cap:geocode']
def _parse_entry(self, dom):
"""Sigh...."""
entry = {}
for tag in self._cap_tags:
# we need to handle the geocodes a bit differently
if tag == 'cap:geocode':
try:
geotypes = []
# FIXME: this will parse VTEC and add it to the feed as well, that's both a feature and a bug
for item in dom.getElementsByTagName('valueName'):
geotypes.append(str(item.firstChild.data))
n = 0
for geotype in geotypes:
try:
entry[geotype] = str(dom.getElementsByTagName('value')[n].firstChild.data).split(' ')
except AttributeError:
pass
n = n + 1
finally:
try:
entry['samecodes'] = [x for x in entry['FIPS6'] if str(x).isdigit()] # handle bad nws data
except Exception:
entry['samecodes'] = []
else:
try:
entry[tag] = dom.getElementsByTagName(tag)[0].firstChild.data
except AttributeError:
entry[tag] = ''
return entry
|
zebpalmer/WeatherAlerts | weatheralerts/cap.py | CapParser._parse_entry | python | def _parse_entry(self, dom):
entry = {}
for tag in self._cap_tags:
# we need to handle the geocodes a bit differently
if tag == 'cap:geocode':
try:
geotypes = []
# FIXME: this will parse VTEC and add it to the feed as well, that's both a feature and a bug
for item in dom.getElementsByTagName('valueName'):
geotypes.append(str(item.firstChild.data))
n = 0
for geotype in geotypes:
try:
entry[geotype] = str(dom.getElementsByTagName('value')[n].firstChild.data).split(' ')
except AttributeError:
pass
n = n + 1
finally:
try:
entry['samecodes'] = [x for x in entry['FIPS6'] if str(x).isdigit()] # handle bad nws data
except Exception:
entry['samecodes'] = []
else:
try:
entry[tag] = dom.getElementsByTagName(tag)[0].firstChild.data
except AttributeError:
entry[tag] = ''
return entry | Sigh.... | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/cap.py#L63-L91 | null | class CapParser(object):
"""
Parses the xml from the alert feed, creates and returns a list of alert objects.
FIXME: This is slow, messy, and painful to look at. I'll be totally rewriting it shortly.
"""
def __init__(self, raw_cap, geo=None):
self._raw_cap = raw_cap
if geo is not None:
self.geo = geo
else:
self.geo = GeoDB()
self.samecodes = self.geo.samecodes
self._cap_tags = ['title', 'id', 'updated', 'published', 'link', 'summary', 'cap:event', 'cap:effective',
'cap:expires', 'cap:status', 'cap:msgType', 'cap:category', 'cap:urgency', 'cap:severity',
'cap:certainty', 'cap:areaDesc', 'cap:geocode']
def get_alerts(self):
"""
Public method that parses
"""
emptyfeed = "There are no active watches, warnings or advisories"
alerts = []
if emptyfeed in str(self._raw_cap):
pass
else:
main_dom = minidom.parseString(self._raw_cap)
xml_entries = main_dom.getElementsByTagName('entry')
# title is currently first so we can detect an empty cap feed
for dom in xml_entries:
# parse the entry to a temp 'entry' dict
entry = self._parse_entry(dom)
# perform some cleanup before creating an object
# entry['locations'] = self.build_locations(entry) # FIXME: remove?
entry['target_areas'] = build_target_areas(entry)
alert = Alert(entry)
alerts.append(alert)
del entry
del alert
return alerts
|
zebpalmer/WeatherAlerts | weatheralerts/geo.py | GeoDB.location_lookup | python | def location_lookup(self, req_location):
location = False
try:
location = self.samecodes[req_location['code']]
except Exception:
pass
try:
location = self.lookup_samecode(req_location['local'], req_location['state'])
except Exception:
pass
return location | returns full location given samecode or county and state. Returns False if not valid.
*currently locations are a dictionary, once other geo data is added, they will move to a location class/obj* | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/geo.py#L20-L35 | [
"def lookup_samecode(self, local, state):\n \"\"\"Given County, State return the SAME code for specified location. Return False if not found\"\"\"\n for location in self.samecodes:\n if state.lower() == self.samecodes[location]['state'].lower():\n if local.lower() == self.samecodes[location]['local'].lower():\n return self.samecodes[location]\n return False\n"
] | class GeoDB(object):
"""
Interact with samecodes data
will be adding additional data (zip code lookup) in the future.
"""
def __init__(self):
self.__same = SameCodes()
self.samecodes = self.__same.samecodes
def lookup_samecode(self, local, state):
"""Given County, State return the SAME code for specified location. Return False if not found"""
for location in self.samecodes:
if state.lower() == self.samecodes[location]['state'].lower():
if local.lower() == self.samecodes[location]['local'].lower():
return self.samecodes[location]
return False
def lookup_county_state(self, samecode):
"""Given a samecode, return county, state"""
location = self.samecodes[samecode]
return location['local'], location['state']
def getstate(self, geosame):
"""Given a SAME code, return the state that SAME code is in"""
state = self.samecodes[geosame]['state']
return state
def getfeedscope(self, geocodes):
"""Given multiple SAME codes, determine if they are all in one state. If so, it returns that state.
Otherwise return 'US'. This is used to determine which NWS feed needs to be parsed to get
all alerts for the requested SAME codes"""
states = self._get_states_from_samecodes(geocodes)
if len(states) >= 2:
return 'US'
else:
return states[0]
def _get_states_from_samecodes(self, geocodes):
"""Returns all states for a given list of SAME codes
*Shouldn't be used to determine feed scope, please use getfeedscope()*
"""
states = []
for code in geocodes:
if not isinstance(geocodes, list):
raise Exception("specified geocodes must be list")
try:
state = self.samecodes[code]['state']
except KeyError:
raise Exception("Samecode Not Found")
else:
if state not in states:
states.append(state)
return states
|
zebpalmer/WeatherAlerts | weatheralerts/geo.py | GeoDB.lookup_samecode | python | def lookup_samecode(self, local, state):
for location in self.samecodes:
if state.lower() == self.samecodes[location]['state'].lower():
if local.lower() == self.samecodes[location]['local'].lower():
return self.samecodes[location]
return False | Given County, State return the SAME code for specified location. Return False if not found | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/geo.py#L37-L43 | null | class GeoDB(object):
"""
Interact with samecodes data
will be adding additional data (zip code lookup) in the future.
"""
def __init__(self):
self.__same = SameCodes()
self.samecodes = self.__same.samecodes
def location_lookup(self, req_location):
"""
returns full location given samecode or county and state. Returns False if not valid.
*currently locations are a dictionary, once other geo data is added, they will move to a location class/obj*
"""
location = False
try:
location = self.samecodes[req_location['code']]
except Exception:
pass
try:
location = self.lookup_samecode(req_location['local'], req_location['state'])
except Exception:
pass
return location
def lookup_county_state(self, samecode):
"""Given a samecode, return county, state"""
location = self.samecodes[samecode]
return location['local'], location['state']
def getstate(self, geosame):
"""Given a SAME code, return the state that SAME code is in"""
state = self.samecodes[geosame]['state']
return state
def getfeedscope(self, geocodes):
"""Given multiple SAME codes, determine if they are all in one state. If so, it returns that state.
Otherwise return 'US'. This is used to determine which NWS feed needs to be parsed to get
all alerts for the requested SAME codes"""
states = self._get_states_from_samecodes(geocodes)
if len(states) >= 2:
return 'US'
else:
return states[0]
def _get_states_from_samecodes(self, geocodes):
"""Returns all states for a given list of SAME codes
*Shouldn't be used to determine feed scope, please use getfeedscope()*
"""
states = []
for code in geocodes:
if not isinstance(geocodes, list):
raise Exception("specified geocodes must be list")
try:
state = self.samecodes[code]['state']
except KeyError:
raise Exception("Samecode Not Found")
else:
if state not in states:
states.append(state)
return states
|
zebpalmer/WeatherAlerts | weatheralerts/geo.py | GeoDB.getfeedscope | python | def getfeedscope(self, geocodes):
states = self._get_states_from_samecodes(geocodes)
if len(states) >= 2:
return 'US'
else:
return states[0] | Given multiple SAME codes, determine if they are all in one state. If so, it returns that state.
Otherwise return 'US'. This is used to determine which NWS feed needs to be parsed to get
all alerts for the requested SAME codes | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/geo.py#L55-L63 | [
"def _get_states_from_samecodes(self, geocodes):\n \"\"\"Returns all states for a given list of SAME codes\n\n *Shouldn't be used to determine feed scope, please use getfeedscope()*\n \"\"\"\n states = []\n for code in geocodes:\n if not isinstance(geocodes, list):\n raise Exception(\"specified geocodes must be list\")\n try:\n state = self.samecodes[code]['state']\n except KeyError:\n raise Exception(\"Samecode Not Found\")\n else:\n if state not in states:\n states.append(state)\n return states\n"
] | class GeoDB(object):
"""
Interact with samecodes data
will be adding additional data (zip code lookup) in the future.
"""
def __init__(self):
self.__same = SameCodes()
self.samecodes = self.__same.samecodes
def location_lookup(self, req_location):
"""
returns full location given samecode or county and state. Returns False if not valid.
*currently locations are a dictionary, once other geo data is added, they will move to a location class/obj*
"""
location = False
try:
location = self.samecodes[req_location['code']]
except Exception:
pass
try:
location = self.lookup_samecode(req_location['local'], req_location['state'])
except Exception:
pass
return location
def lookup_samecode(self, local, state):
"""Given County, State return the SAME code for specified location. Return False if not found"""
for location in self.samecodes:
if state.lower() == self.samecodes[location]['state'].lower():
if local.lower() == self.samecodes[location]['local'].lower():
return self.samecodes[location]
return False
def lookup_county_state(self, samecode):
"""Given a samecode, return county, state"""
location = self.samecodes[samecode]
return location['local'], location['state']
def getstate(self, geosame):
"""Given a SAME code, return the state that SAME code is in"""
state = self.samecodes[geosame]['state']
return state
def _get_states_from_samecodes(self, geocodes):
"""Returns all states for a given list of SAME codes
*Shouldn't be used to determine feed scope, please use getfeedscope()*
"""
states = []
for code in geocodes:
if not isinstance(geocodes, list):
raise Exception("specified geocodes must be list")
try:
state = self.samecodes[code]['state']
except KeyError:
raise Exception("Samecode Not Found")
else:
if state not in states:
states.append(state)
return states
|
zebpalmer/WeatherAlerts | weatheralerts/geo.py | GeoDB._get_states_from_samecodes | python | def _get_states_from_samecodes(self, geocodes):
states = []
for code in geocodes:
if not isinstance(geocodes, list):
raise Exception("specified geocodes must be list")
try:
state = self.samecodes[code]['state']
except KeyError:
raise Exception("Samecode Not Found")
else:
if state not in states:
states.append(state)
return states | Returns all states for a given list of SAME codes
*Shouldn't be used to determine feed scope, please use getfeedscope()* | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/geo.py#L65-L81 | null | class GeoDB(object):
"""
Interact with samecodes data
will be adding additional data (zip code lookup) in the future.
"""
def __init__(self):
self.__same = SameCodes()
self.samecodes = self.__same.samecodes
def location_lookup(self, req_location):
"""
returns full location given samecode or county and state. Returns False if not valid.
*currently locations are a dictionary, once other geo data is added, they will move to a location class/obj*
"""
location = False
try:
location = self.samecodes[req_location['code']]
except Exception:
pass
try:
location = self.lookup_samecode(req_location['local'], req_location['state'])
except Exception:
pass
return location
def lookup_samecode(self, local, state):
"""Given County, State return the SAME code for specified location. Return False if not found"""
for location in self.samecodes:
if state.lower() == self.samecodes[location]['state'].lower():
if local.lower() == self.samecodes[location]['local'].lower():
return self.samecodes[location]
return False
def lookup_county_state(self, samecode):
"""Given a samecode, return county, state"""
location = self.samecodes[samecode]
return location['local'], location['state']
def getstate(self, geosame):
"""Given a SAME code, return the state that SAME code is in"""
state = self.samecodes[geosame]['state']
return state
def getfeedscope(self, geocodes):
"""Given multiple SAME codes, determine if they are all in one state. If so, it returns that state.
Otherwise return 'US'. This is used to determine which NWS feed needs to be parsed to get
all alerts for the requested SAME codes"""
states = self._get_states_from_samecodes(geocodes)
if len(states) >= 2:
return 'US'
else:
return states[0]
|
zebpalmer/WeatherAlerts | weatheralerts/geo.py | SameCodes._load_same_codes | python | def _load_same_codes(self, refresh=False):
if refresh is True:
self._get_same_codes()
else:
self._cached_same_codes() | Loads the Same Codes into this object | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/geo.py#L106-L111 | [
"def _get_same_codes(self):\n \"\"\"get SAME codes, load into a dict and cache\"\"\"\n same = {}\n url = '''http://www.nws.noaa.gov/nwr/data/SameCode.txt'''\n # pylint: disable=E1103\n raw = requests.get(url).content.decode('utf-8') # py3 compatibility\n for row in raw.split('\\n'):\n try:\n code, local, state = str(row).strip().split(',')\n location = {'code': code, 'local': local, 'state': state.strip()}\n # when I contacted the nws to add a missing same code\n # they added a space before the state in the samecodes file\n # stripping it out\n same[code] = location\n finally:\n pass\n cache = open(self._same_cache_file, 'wb')\n cPickle.dump(same, cache)\n cache.close()\n return same\n",
"def _cached_same_codes(self):\n \"\"\"If a cached copy is available, return it\"\"\"\n cache_file = self._same_cache_file\n if os.path.exists(cache_file):\n maxage = datetime.now() - timedelta(minutes=4320)\n file_ts = datetime.fromtimestamp(os.stat(cache_file).st_mtime)\n if file_ts > maxage:\n try:\n cache = open(cache_file, 'rb')\n self._samecodes = cPickle.load(cache)\n cache.close()\n return True\n finally:\n pass\n self.reload()\n"
] | class SameCodes(object):
"""
Is used to download, parse and cache the SAME codes data from the web.
*All interaction with the SAME codes data should be done with the GeoGB object*
"""
def __init__(self):
self._cachedir = str(tempfile.gettempdir()) + '/'
self._same_cache_file = self._cachedir + 'nws_samecodes_{0}.cache'.format(sys.version_info[0])
self._samecodes = None
self._load_same_codes()
@property
def samecodes(self):
"""public method to return the same codes list"""
return self._samecodes
def reload(self):
"""force refresh of Same Codes"""
self._load_same_codes(refresh=True)
def _get_same_codes(self):
"""get SAME codes, load into a dict and cache"""
same = {}
url = '''http://www.nws.noaa.gov/nwr/data/SameCode.txt'''
# pylint: disable=E1103
raw = requests.get(url).content.decode('utf-8') # py3 compatibility
for row in raw.split('\n'):
try:
code, local, state = str(row).strip().split(',')
location = {'code': code, 'local': local, 'state': state.strip()}
# when I contacted the nws to add a missing same code
# they added a space before the state in the samecodes file
# stripping it out
same[code] = location
finally:
pass
cache = open(self._same_cache_file, 'wb')
cPickle.dump(same, cache)
cache.close()
return same
def _cached_same_codes(self):
"""If a cached copy is available, return it"""
cache_file = self._same_cache_file
if os.path.exists(cache_file):
maxage = datetime.now() - timedelta(minutes=4320)
file_ts = datetime.fromtimestamp(os.stat(cache_file).st_mtime)
if file_ts > maxage:
try:
cache = open(cache_file, 'rb')
self._samecodes = cPickle.load(cache)
cache.close()
return True
finally:
pass
self.reload()
|
zebpalmer/WeatherAlerts | weatheralerts/geo.py | SameCodes._get_same_codes | python | def _get_same_codes(self):
same = {}
url = '''http://www.nws.noaa.gov/nwr/data/SameCode.txt'''
# pylint: disable=E1103
raw = requests.get(url).content.decode('utf-8') # py3 compatibility
for row in raw.split('\n'):
try:
code, local, state = str(row).strip().split(',')
location = {'code': code, 'local': local, 'state': state.strip()}
# when I contacted the nws to add a missing same code
# they added a space before the state in the samecodes file
# stripping it out
same[code] = location
finally:
pass
cache = open(self._same_cache_file, 'wb')
cPickle.dump(same, cache)
cache.close()
return same | get SAME codes, load into a dict and cache | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/geo.py#L113-L132 | null | class SameCodes(object):
"""
Is used to download, parse and cache the SAME codes data from the web.
*All interaction with the SAME codes data should be done with the GeoGB object*
"""
def __init__(self):
self._cachedir = str(tempfile.gettempdir()) + '/'
self._same_cache_file = self._cachedir + 'nws_samecodes_{0}.cache'.format(sys.version_info[0])
self._samecodes = None
self._load_same_codes()
@property
def samecodes(self):
"""public method to return the same codes list"""
return self._samecodes
def reload(self):
"""force refresh of Same Codes"""
self._load_same_codes(refresh=True)
def _load_same_codes(self, refresh=False):
"""Loads the Same Codes into this object"""
if refresh is True:
self._get_same_codes()
else:
self._cached_same_codes()
def _cached_same_codes(self):
"""If a cached copy is available, return it"""
cache_file = self._same_cache_file
if os.path.exists(cache_file):
maxage = datetime.now() - timedelta(minutes=4320)
file_ts = datetime.fromtimestamp(os.stat(cache_file).st_mtime)
if file_ts > maxage:
try:
cache = open(cache_file, 'rb')
self._samecodes = cPickle.load(cache)
cache.close()
return True
finally:
pass
self.reload()
|
zebpalmer/WeatherAlerts | weatheralerts/geo.py | SameCodes._cached_same_codes | python | def _cached_same_codes(self):
cache_file = self._same_cache_file
if os.path.exists(cache_file):
maxage = datetime.now() - timedelta(minutes=4320)
file_ts = datetime.fromtimestamp(os.stat(cache_file).st_mtime)
if file_ts > maxage:
try:
cache = open(cache_file, 'rb')
self._samecodes = cPickle.load(cache)
cache.close()
return True
finally:
pass
self.reload() | If a cached copy is available, return it | train | https://github.com/zebpalmer/WeatherAlerts/blob/b99513571571fa0d65b90be883bb3bc000994027/weatheralerts/geo.py#L134-L148 | [
"def reload(self):\n \"\"\"force refresh of Same Codes\"\"\"\n self._load_same_codes(refresh=True)\n"
] | class SameCodes(object):
"""
Is used to download, parse and cache the SAME codes data from the web.
*All interaction with the SAME codes data should be done with the GeoGB object*
"""
def __init__(self):
self._cachedir = str(tempfile.gettempdir()) + '/'
self._same_cache_file = self._cachedir + 'nws_samecodes_{0}.cache'.format(sys.version_info[0])
self._samecodes = None
self._load_same_codes()
@property
def samecodes(self):
"""public method to return the same codes list"""
return self._samecodes
def reload(self):
"""force refresh of Same Codes"""
self._load_same_codes(refresh=True)
def _load_same_codes(self, refresh=False):
"""Loads the Same Codes into this object"""
if refresh is True:
self._get_same_codes()
else:
self._cached_same_codes()
def _get_same_codes(self):
"""get SAME codes, load into a dict and cache"""
same = {}
url = '''http://www.nws.noaa.gov/nwr/data/SameCode.txt'''
# pylint: disable=E1103
raw = requests.get(url).content.decode('utf-8') # py3 compatibility
for row in raw.split('\n'):
try:
code, local, state = str(row).strip().split(',')
location = {'code': code, 'local': local, 'state': state.strip()}
# when I contacted the nws to add a missing same code
# they added a space before the state in the samecodes file
# stripping it out
same[code] = location
finally:
pass
cache = open(self._same_cache_file, 'wb')
cPickle.dump(same, cache)
cache.close()
return same
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.type | python | def type(self):
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value | Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default). | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L56-L98 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.properties | python | def properties(self):
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value | Schema for particular properties of the object. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L101-L107 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.items | python | def items(self):
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value | Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L110-L123 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.optional | python | def optional(self):
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value | Flag indicating an optional property. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L126-L132 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.additionalProperties | python | def additionalProperties(self):
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value | Schema for all additional properties, or False. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L135-L142 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.requires | python | def requires(self):
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value | Additional object or objects required by this object. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L145-L153 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.maximum | python | def maximum(self):
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value | Maximum value of the object. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L168-L177 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.minimumCanEqual | python | def minimumCanEqual(self):
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value | Flag indicating if maximum value is inclusive or exclusive. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L180-L189 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.maximumCanEqual | python | def maximumCanEqual(self):
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value | Flag indicating if the minimum value is inclusive or exclusive. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L192-L201 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.pattern | python | def pattern(self):
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex))) | Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L236-L257 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.maxLength | python | def maxLength(self):
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value | Maximum length of object. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L272-L280 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.enum | python | def enum(self):
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value | Enumeration of allowed object values.
The enumeration must not contain duplicates. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L283-L307 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.title | python | def title(self):
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value | Title of the object.
This schema element is purely informative. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L310-L322 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.format | python | def format(self):
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value)) | Format of the (string) object. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L340-L354 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.divisibleBy | python | def divisibleBy(self):
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value | Integer that divides the object without reminder. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L374-L387 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
@property
def disallow(self):
"""
Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``.
"""
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/schema.py | Schema.disallow | python | def disallow(self):
value = self._schema.get("disallow", None)
if value is None:
return
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"disallow value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
disallow_list = value
else:
disallow_list = [value]
seen = set()
for js_disallow in disallow_list:
if isinstance(js_disallow, dict):
# no nested validation here
pass
else:
if js_disallow in seen:
raise SchemaError(
"disallow value {0!r} contains duplicate element"
" {1!r}".format(value, js_disallow))
else:
seen.add(js_disallow)
if js_disallow not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"disallow value {0!r} is not a simple type"
" name".format(js_disallow))
return disallow_list | Description of disallowed objects.
Disallow must be a type name, a nested schema or a list of those. Type
name must be one of ``string``, ``number``, ``integer``, ``boolean``,
``object``, ``array``, ``null`` or ``any``. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L390-L427 | null | class Schema(object):
"""
JSON schema object.
Schema describes aspects of a valid object. Upon validation each object has
an associated schema. Various properties of the object are tested against
rules described by the schema.
"""
def __init__(self, json_obj):
"""
Initialize a schema with a schema representation.
:param json_obj:
A JSON object (python dictionary) describing the schema.
"""
if not isinstance(json_obj, dict):
raise SchemaError("Schema definition must be a JSON object")
self._schema = json_obj
def __repr__(self):
return "Schema({0!r})".format(self._schema)
@property
def type(self):
"""
Type of a valid object.
Type may be a JSON type name or a list of such names. Valid JSON type
names are ``string``, ``number``, ``integer``, ``boolean``, ``object``,
``array``, ``any`` (default).
"""
value = self._schema.get("type", "any")
if not isinstance(value, (basestring, dict, list)):
raise SchemaError(
"type value {0!r} is not a simple type name, nested "
"schema nor a list of those".format(value))
if isinstance(value, list):
type_list = value
# Union types have to have at least two alternatives
if len(type_list) < 2:
raise SchemaError(
"union type {0!r} is too short".format(value))
else:
type_list = [value]
seen = set()
for js_type in type_list:
if isinstance(js_type, dict):
# no nested validation here
pass
elif isinstance(js_type, list):
# no nested validation here
pass
else:
if js_type in seen:
raise SchemaError(
("type value {0!r} contains duplicate element"
" {1!r}").format(value, js_type))
else:
seen.add(js_type)
if js_type not in (
"string", "number", "integer", "boolean", "object",
"array", "null", "any"):
raise SchemaError(
"type value {0!r} is not a simple type "
"name".format(js_type))
return value
@property
def properties(self):
"""Schema for particular properties of the object."""
value = self._schema.get("properties", {})
if not isinstance(value, dict):
raise SchemaError(
"properties value {0!r} is not an object".format(value))
return value
@property
def items(self):
"""
Schema or a list of schemas describing particular elements of the object.
A single schema applies to all the elements. Each element of the object
must match that schema. A list of schemas describes particular elements
of the object.
"""
value = self._schema.get("items", {})
if not isinstance(value, (list, dict)):
raise SchemaError(
"items value {0!r} is neither a list nor an object".
format(value))
return value
@property
def optional(self):
"""Flag indicating an optional property."""
value = self._schema.get("optional", False)
if value is not False and value is not True:
raise SchemaError(
"optional value {0!r} is not a boolean".format(value))
return value
@property
def additionalProperties(self):
"""Schema for all additional properties, or False."""
value = self._schema.get("additionalProperties", {})
if not isinstance(value, dict) and value is not False:
raise SchemaError(
"additionalProperties value {0!r} is neither false nor"
" an object".format(value))
return value
@property
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value
@property
def minimum(self):
"""Minimum value of the object."""
value = self._schema.get("minimum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"minimum value {0!r} is not a numeric type".format(
value))
return value
@property
def maximum(self):
"""Maximum value of the object."""
value = self._schema.get("maximum", None)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"maximum value {0!r} is not a numeric type".format(
value))
return value
@property
def minimumCanEqual(self):
"""Flag indicating if maximum value is inclusive or exclusive."""
if self.minimum is None:
raise SchemaError("minimumCanEqual requires presence of minimum")
value = self._schema.get("minimumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"minimumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def maximumCanEqual(self):
"""Flag indicating if the minimum value is inclusive or exclusive."""
if self.maximum is None:
raise SchemaError("maximumCanEqual requires presence of maximum")
value = self._schema.get("maximumCanEqual", True)
if value is not True and value is not False:
raise SchemaError(
"maximumCanEqual value {0!r} is not a boolean".format(
value))
return value
@property
def minItems(self):
"""Minimum number of items in the collection."""
value = self._schema.get("minItems", 0)
if not isinstance(value, int):
raise SchemaError(
"minItems value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minItems value {0!r} cannot be negative".format(value))
return value
@property
def maxItems(self):
"""Maximum number of items in the collection."""
value = self._schema.get("maxItems", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxItems value {0!r} is not an integer".format(value))
return value
@property
def uniqueItems(self):
"""Flag indicating that valid is a collection without duplicates."""
value = self._schema.get("uniqueItems", False)
if value is not True and value is not False:
raise SchemaError(
"uniqueItems value {0!r} is not a boolean".format(value))
return value
@property
def pattern(self):
"""
Regular expression describing valid objects.
.. note::
JSON schema specifications says that this value SHOULD
follow the ``EMCA 262/Perl 5`` format. We cannot support
this so we support python regular expressions instead. This
is still valid but should be noted for clarity.
:returns:
None or compiled regular expression
"""
value = self._schema.get("pattern", None)
if value is None:
return
try:
return re.compile(value)
except re.error as ex:
raise SchemaError(
"pattern value {0!r} is not a valid regular expression:"
" {1}".format(value, str(ex)))
@property
def minLength(self):
"""Minimum length of object."""
value = self._schema.get("minLength", 0)
if not isinstance(value, int):
raise SchemaError(
"minLength value {0!r} is not an integer".format(value))
if value < 0:
raise SchemaError(
"minLength value {0!r} cannot be negative".format(value))
return value
@property
def maxLength(self):
"""Maximum length of object."""
value = self._schema.get("maxLength", None)
if value is None:
return
if not isinstance(value, int):
raise SchemaError(
"maxLength value {0!r} is not an integer".format(value))
return value
@property
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
@property
def title(self):
"""
Title of the object.
This schema element is purely informative.
"""
value = self._schema.get("title", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"title value {0!r} is not a string".format(value))
return value
@property
def description(self):
"""
Description of the object.
This schema element is purely informative.
"""
value = self._schema.get("description", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"description value {0!r} is not a string".format(value))
return value
@property
def format(self):
"""Format of the (string) object."""
value = self._schema.get("format", None)
if value is None:
return
if not isinstance(value, basestring):
raise SchemaError(
"format value {0!r} is not a string".format(value))
if value in [
'date-time',
'regex',
]:
return value
raise NotImplementedError(
"format value {0!r} is not supported".format(value))
@property
def contentEncoding(self):
value = self._schema.get("contentEncoding", None)
if value is None:
return
if value.lower() not in [
"7bit", "8bit", "binary", "quoted-printable", "base64",
"ietf-token", "x-token"]:
raise SchemaError(
"contentEncoding value {0!r} is not"
" valid".format(value))
if value.lower() != "base64":
raise NotImplementedError(
"contentEncoding value {0!r} is not supported".format(
value))
return value
@property
def divisibleBy(self):
"""Integer that divides the object without reminder."""
value = self._schema.get("divisibleBy", 1)
if value is None:
return
if not isinstance(value, NUMERIC_TYPES):
raise SchemaError(
"divisibleBy value {0!r} is not a numeric type".
format(value))
if value < 0:
raise SchemaError(
"divisibleBy value {0!r} cannot be"
" negative".format(value))
return value
@property
@property
def extends(self):
raise NotImplementedError("extends property is not supported")
@property
def default(self):
"""Default value for an object."""
try:
return self._schema["default"]
except KeyError:
raise SchemaError("There is no schema default for this item")
|
zyga/json-schema-validator | json_schema_validator/shortcuts.py | validate | python | def validate(schema_text, data_text, deserializer=_default_deserializer):
schema = Schema(deserializer(schema_text))
data = deserializer(data_text)
return Validator.validate(schema, data) | Validate specified JSON text with specified schema.
Both arguments are converted to JSON objects with :func:`simplejson.loads`,
if present, or :func:`json.loads`.
:param schema_text:
Text of the JSON schema to check against
:type schema_text:
:class:`str`
:param data_text:
Text of the JSON object to check
:type data_text:
:class:`str`
:param deserializer:
Function to convert the schema and data to JSON objects
:type deserializer:
:class:`callable`
:returns:
Same as :meth:`json_schema_validator.validator.Validator.validate`
:raises:
Whatever may be raised by simplejson (in particular
:class:`simplejson.decoder.JSONDecoderError`, a subclass of
:class:`ValueError`) or json
:raises:
Whatever may be raised by
:meth:`json_schema_validator.validator.Validator.validate`. In particular
:class:`json_schema_validator.errors.ValidationError` and
:class:`json_schema_validator.errors.SchemaError` | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/shortcuts.py#L33-L66 | [
"def validate(cls, schema, obj):\n \"\"\"\n Validate specified JSON object obj with specified schema.\n\n :param schema:\n Schema to validate against\n :type schema:\n :class:`json_schema_validator.schema.Schema`\n :param obj:\n JSON object to validate\n :rtype:\n bool\n :returns:\n True on success\n :raises `json_schema_validator.errors.ValidationError`:\n if the object does not match schema.\n :raises `json_schema_validator.errors.SchemaError`:\n if the schema itself is wrong.\n \"\"\"\n if not isinstance(schema, Schema):\n raise ValueError(\n \"schema value {0!r} is not a Schema\"\n \" object\".format(schema))\n self = cls()\n self.validate_toplevel(schema, obj)\n return True\n"
] | # Copyright (C) 2010, 2011 Linaro Limited
# Copyright (C) 2016 Zygmunt Krynicki
#
# Author: Zygmunt Krynicki <me@zygoon.pl>
#
# This file is part of json-schema-validator.
#
# json-schema-validator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation
#
# json-schema-validator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with json-schema-validator. If not, see <http://www.gnu.org/licenses/>.
"""One liners that make the code shorter."""
try:
import simplejson as json
except ImportError:
import json
from json_schema_validator.schema import Schema
from json_schema_validator.validator import Validator
_default_deserializer = json.loads
|
zyga/json-schema-validator | json_schema_validator/extensions.py | timedelta_extension.to_json | python | def to_json(cls, obj):
return "{0}d {1}s {2}us".format(
obj.days, obj.seconds, obj.microseconds) | Serialize wrapped datetime.timedelta instance to a string the
with the following format:
[DAYS]d [SECONDS]s [MICROSECONDS]us | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/extensions.py#L80-L87 | null | class timedelta_extension(object):
"""
Proxy for serializing datetime.timedelta instances
"""
PATTERN = re.compile("^(\d+)d (\d+)s (\d+)us$")
@classmethod
@classmethod
def from_json(cls, doc):
"""
Deserialize JSON document (string) to datetime.timedelta instance
"""
if not isinstance(doc, basestring):
raise TypeError("JSON document must be a string")
match = cls.PATTERN.match(doc)
if not match:
raise ValueError("JSON document must match expected pattern")
days, seconds, microseconds = map(int, match.groups())
return timedelta(days, seconds, microseconds)
|
zyga/json-schema-validator | json_schema_validator/extensions.py | timedelta_extension.from_json | python | def from_json(cls, doc):
if not isinstance(doc, basestring):
raise TypeError("JSON document must be a string")
match = cls.PATTERN.match(doc)
if not match:
raise ValueError("JSON document must match expected pattern")
days, seconds, microseconds = map(int, match.groups())
return timedelta(days, seconds, microseconds) | Deserialize JSON document (string) to datetime.timedelta instance | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/extensions.py#L90-L100 | null | class timedelta_extension(object):
"""
Proxy for serializing datetime.timedelta instances
"""
PATTERN = re.compile("^(\d+)d (\d+)s (\d+)us$")
@classmethod
def to_json(cls, obj):
"""
Serialize wrapped datetime.timedelta instance to a string the
with the following format:
[DAYS]d [SECONDS]s [MICROSECONDS]us
"""
return "{0}d {1}s {2}us".format(
obj.days, obj.seconds, obj.microseconds)
@classmethod
|
zyga/json-schema-validator | json_schema_validator/validator.py | Validator.validate | python | def validate(cls, schema, obj):
if not isinstance(schema, Schema):
raise ValueError(
"schema value {0!r} is not a Schema"
" object".format(schema))
self = cls()
self.validate_toplevel(schema, obj)
return True | Validate specified JSON object obj with specified schema.
:param schema:
Schema to validate against
:type schema:
:class:`json_schema_validator.schema.Schema`
:param obj:
JSON object to validate
:rtype:
bool
:returns:
True on success
:raises `json_schema_validator.errors.ValidationError`:
if the object does not match schema.
:raises `json_schema_validator.errors.SchemaError`:
if the schema itself is wrong. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/validator.py#L81-L106 | [
"def validate_toplevel(self, schema, obj):\n self._object_stack = []\n self._schema_stack = []\n self._push_schema(schema, \"schema\")\n self._push_object(obj, \"object\")\n self._validate()\n self._pop_schema()\n self._pop_object()\n"
] | class Validator(object):
"""
JSON Schema validator.
Can be used to validate any JSON document against a
:class:`json_schema_validator.schema.Schema`.
"""
JSON_TYPE_MAP = {
"string": basestring,
"number": NUMERIC_TYPES,
"integer": int,
"object": dict,
"array": list,
"null": None.__class__,
}
def __init__(self):
self._schema_stack = []
self._object_stack = []
def _push_object(self, obj, path):
self._object_stack.append((obj, path))
def _pop_object(self):
self._object_stack.pop()
def _push_schema(self, schema, path):
self._schema_stack.append((schema, path))
def _pop_schema(self):
self._schema_stack.pop()
@property
def _object(self):
return self._object_stack[-1][0]
@property
def _schema(self):
return self._schema_stack[-1][0]
@classmethod
def _get_object_expression(self):
return "".join(map(lambda x: x[1], self._object_stack))
def _get_schema_expression(self):
return "".join(map(lambda x: x[1], self._schema_stack))
def validate_toplevel(self, schema, obj):
self._object_stack = []
self._schema_stack = []
self._push_schema(schema, "schema")
self._push_object(obj, "object")
self._validate()
self._pop_schema()
self._pop_object()
def _validate(self):
obj = self._object
self._validate_type()
self._validate_requires()
if isinstance(obj, dict):
self._validate_properties()
self._validate_additional_properties()
elif isinstance(obj, list):
self._validate_items()
else:
self._validate_enum()
self._validate_format()
self._validate_pattern()
if isinstance(obj, basestring):
self._validate_length()
elif isinstance(obj, NUMERIC_TYPES):
self._validate_range()
self._report_unsupported()
def _report_error(self, legacy_message, new_message=None,
schema_suffix=None):
"""
Report an error during validation.
There are two error messages. The legacy message is used for backwards
compatibility and usually contains the object (possibly very large)
that failed to validate. The new message is much better as it contains
just a short message on what went wrong. User code can inspect
object_expr and schema_expr to see which part of the object failed to
validate against which part of the schema.
The schema_suffix, if provided, is appended to the schema_expr. This
is quite handy to specify the bit that the validator looked at (such as
the type or optional flag, etc). object_suffix serves the same purpose
but is used for object expressions instead.
"""
object_expr = self._get_object_expression()
schema_expr = self._get_schema_expression()
if schema_suffix:
schema_expr += schema_suffix
raise ValidationError(legacy_message, new_message, object_expr,
schema_expr)
def _push_property_schema(self, prop):
"""Construct a sub-schema from a property of the current schema."""
schema = Schema(self._schema.properties[prop])
self._push_schema(schema, ".properties." + prop)
def _push_additional_property_schema(self):
schema = Schema(self._schema.additionalProperties)
self._push_schema(schema, ".additionalProperties")
def _push_array_schema(self):
schema = Schema(self._schema.items)
self._push_schema(schema, ".items")
def _push_array_item_object(self, index):
self._push_object(self._object[index], "[%d]" % index)
def _push_property_object(self, prop):
self._push_object(self._object[prop], "." + prop)
def _report_unsupported(self):
schema = self._schema
if schema.contentEncoding is not None:
raise NotImplementedError("contentEncoding is not supported")
if schema.divisibleBy != 1:
raise NotImplementedError("divisibleBy is not supported")
if schema.disallow is not None:
raise NotImplementedError("disallow is not supported")
def _validate_type(self):
schema = self._schema
json_type = schema.type
if json_type == "any":
return
obj = self._object
if json_type == "boolean":
# Bool is special cased because in python there is no
# way to test for isinstance(something, bool) that would
# not catch isinstance(1, bool) :/
if obj is not True and obj is not False:
self._report_error(
"{obj!r} does not match type {type!r}".format(
obj=obj, type=json_type),
"Object has incorrect type (expected boolean)",
schema_suffix=".type")
elif isinstance(json_type, dict):
# Nested type check. This is pretty odd case. Here we
# don't change our object stack (it's the same object).
self._push_schema(Schema(json_type), ".type")
self._validate()
self._pop_schema()
elif isinstance(json_type, list):
# Alternative type check, here we may match _any_ of the types
# in the list to be considered valid.
json_type_list = json_type
if json_type == []:
return
for index, json_type in enumerate(json_type_list):
# Aww, ugly. The level of packaging around Schema is annoying
self._push_schema(
Schema({'type': json_type}),
".type.%d" % index)
try:
self._validate()
except ValidationError:
# Ignore errors, we just want one thing to match
pass
else:
# We've got a match - break the loop
break
finally:
# Pop the schema regardless of match/mismatch
self._pop_schema()
else:
# We were not interupted (no break) so we did not match
self._report_error(
"{obj!r} does not match any of the types in {type!r}".format(
obj=obj, type=json_type_list),
"Object has incorrect type (multiple types possible)",
schema_suffix=".type")
else:
# Simple type check
if not isinstance(obj, self.JSON_TYPE_MAP[json_type]):
self._report_error(
"{obj!r} does not match type {type!r}".format(
obj=obj, type=json_type),
"Object has incorrect type (expected {type})".format(
type=json_type),
schema_suffix=".type")
def _validate_pattern(self):
ptn = self._schema.pattern
obj = self._object
if ptn is None:
return
if not isinstance(obj, basestring):
return
if re.match(ptn, obj):
return
self._report_error(
"{obj!r} does not match pattern {ptn!r}".format(
obj=obj,ptn=ptn),
"Object does not match pattern (expected {ptn})".format(
ptn=ptn),
schema_suffix=".pattern"
)
def _validate_format(self):
fmt = self._schema.format
obj = self._object
if fmt is None:
return
if fmt == 'date-time':
try:
DATE_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
datetime.datetime.strptime(obj, DATE_TIME_FORMAT)
except ValueError:
self._report_error(
"{obj!r} is not a string representing JSON date-time".format(
obj=obj),
"Object is not a string representing JSON date-time",
schema_suffix=".format")
elif fmt == 'regex':
try:
re.compile(obj)
except:
self._report_error(
"{obj!r} is not a string representing a regex".format(
obj=obj),
"Object is not a string representing a regex",
schema_suffix=".format")
else:
raise NotImplementedError("format {0!r} is not supported".format(fmt))
def _validate_properties(self):
obj = self._object
schema = self._schema
assert isinstance(obj, dict)
for prop in schema.properties.keys():
self._push_property_schema(prop)
if prop in obj:
self._push_property_object(prop)
self._validate()
self._pop_object()
else:
if not self._schema.optional:
self._report_error(
"{obj!r} does not have property {prop!r}".format(
obj=obj, prop=prop),
"Object lacks property {prop!r}".format(
prop=prop),
schema_suffix=".optional")
self._pop_schema()
def _validate_additional_properties(self):
obj = self._object
assert isinstance(obj, dict)
if self._schema.additionalProperties is False:
# Additional properties are disallowed
# Report exception for each unknown property
for prop in obj.keys():
if prop not in self._schema.properties:
self._report_error(
"{obj!r} has unknown property {prop!r} and"
" additionalProperties is false".format(
obj=obj, prop=prop),
"Object has unknown property {prop!r} but"
" additional properties are disallowed".format(
prop=prop),
schema_suffix=".additionalProperties")
else:
# Check each property against this object
self._push_additional_property_schema()
for prop in obj.keys():
self._push_property_object(prop)
self._validate()
self._pop_object()
self._pop_schema()
def _validate_enum(self):
obj = self._object
schema = self._schema
if schema.enum is not None:
for allowed_value in schema.enum:
if obj == allowed_value:
break
else:
self._report_error(
"{obj!r} does not match any value in enumeration"
" {enum!r}".format(obj=obj, enum=schema.enum),
"Object does not match any value in enumeration",
schema_suffix=".enum")
def _validate_length(self):
obj = self._object
schema = self._schema
if schema.minLength is not None:
if len(obj) < schema.minLength:
self._report_error(
"{obj!r} does not meet the minimum length"
" {minLength!r}".format(obj=obj, minLength=schema.minLength),
"Object does not meet the minimum length",
schema_suffix=".minLength")
if schema.maxLength is not None:
if len(obj) > schema.maxLength:
self._report_error(
"{obj!r} exceeds the maximum length"
" {maxLength!r}".format(obj=obj, maxLength=schema.maxLength),
"Object exceeds the maximum length",
schema_suffix=".maxLength")
def _validate_range(self):
obj = self._object
schema = self._schema
if schema.minimum is not None:
if obj < schema.minimum or (obj == schema.minimum and not schema.minimumCanEqual):
self._report_error(
"{obj!r} is less than the minimum"
" {minimum!r}".format(obj=obj, minimum=schema.minimum),
"Object is less than the minimum",
schema_suffix=".minimum")
if schema.maximum is not None:
if obj > schema.maximum or (obj == schema.maximum and not schema.maximumCanEqual):
self._report_error(
"{obj!r} is greater than the maximum"
" {maximum!r}".format(obj=obj, maximum=schema.maximum),
"Object is greater than the maximum",
schema_suffix=".maximum")
def _validate_items(self):
obj = self._object
schema = self._schema
assert isinstance(obj, list)
items_schema_json = schema.items
if items_schema_json == {}:
# default value, don't do anything
return
if isinstance(obj, list) and schema.uniqueItems is True and len(set(obj)) != len(obj):
# If we want a list of unique items and the length of unique
# elements is different from the length of the full list
# then validation fails.
# This implementation isn't strictly compatible with the specs, because
# we are not checking unique dicts.
self._report_error(
"Repeated items found in {obj!r}".format(obj=obj),
"Repeated items found in array",
schema_suffix=".items")
if schema.minItems:
if len(obj) < schema.minItems:
self._report_error(
"{obj!r} has fewer than the minimum number of items"
" {minItems!r}".format(obj=obj, minimum=schema.minItems),
"Object has fewer than the minimum number of items",
schema_suffix=".minItems")
if schema.maxItems is not None:
if len(obj) > schema.maxItems:
self._report_error(
"{obj!r} has more than the maximum number of items"
" {maxItems!r}".format(obj=obj, minimum=schema.maxItems),
"Object has more than the maximum number of items",
schema_suffix=".maxItems")
if isinstance(items_schema_json, dict):
self._push_array_schema()
for index, item in enumerate(obj):
self._push_array_item_object(index)
self._validate()
self._pop_object()
self._pop_schema()
elif isinstance(items_schema_json, list):
if len(obj) < len(items_schema_json):
# If our data array is shorter than the schema then
# validation fails. Longer arrays are okay (during this
# step) as they are validated based on
# additionalProperties schema
self._report_error(
"{obj!r} is shorter than array schema {schema!r}".
format(obj=obj, schema=items_schema_json),
"Object array is shorter than schema array",
schema_suffix=".items")
if len(obj) != len(items_schema_json) and schema.additionalProperties is False:
# If our array is not exactly the same size as the
# schema and additional properties are disallowed then
# validation fails
self._report_error(
"{obj!r} is not of the same length as array schema"
" {schema!r} and additionalProperties is"
" false".format(obj=obj, schema=items_schema_json),
"Object array is not of the same length as schema array",
schema_suffix=".items")
# Validate each array element using schema for the
# corresponding array index, fill missing values (since
# there may be more items in our array than in the schema)
# with additionalProperties which by now is not False
for index, (item, item_schema_json) in enumerate(
zip_longest(
obj, items_schema_json,
fillvalue=schema.additionalProperties)):
item_schema = Schema(item_schema_json)
if index < len(items_schema_json):
self._push_schema(item_schema, "items[%d]" % index)
else:
self._push_schema(item_schema, ".additionalProperties")
self._push_array_item_object(index)
self._validate()
self._pop_schema()
self._pop_object()
def _validate_requires(self):
obj = self._object
schema = self._schema
requires_json = schema.requires
if requires_json == {}:
# default value, don't do anything
return
# Find our enclosing object in the object stack
if len(self._object_stack) < 2:
self._report_error(
"{obj!r} requires that enclosing object matches"
" schema {schema!r} but there is no enclosing"
" object".format(obj=obj, schema=requires_json),
"Object has no enclosing object that matches schema",
schema_suffix=".requires")
# Note: Parent object can be None, (e.g. a null property)
parent_obj = self._object_stack[-2][0]
if isinstance(requires_json, basestring):
# This is a simple property test
if (not isinstance(parent_obj, dict)
or requires_json not in parent_obj):
self._report_error(
"{obj!r} requires presence of property {requires!r}"
" in the same object".format(
obj=obj, requires=requires_json),
"Enclosing object does not have property"
" {prop!r}".format(prop=requires_json),
schema_suffix=".requires")
elif isinstance(requires_json, dict):
# Requires designates a whole schema, the enclosing object
# must match against that schema.
# Here we resort to a small hack. Proper implementation
# would require us to validate the parent object from its
# own context (own list of parent objects). Since doing that
# and restoring the state would be very complicated we just
# instantiate a new validator with a subset of our current
# history here.
sub_validator = Validator()
sub_validator._object_stack = self._object_stack[:-1]
sub_validator._schema_stack = self._schema_stack[:]
sub_validator._push_schema(
Schema(requires_json), ".requires")
sub_validator._validate()
|
zyga/json-schema-validator | json_schema_validator/validator.py | Validator._report_error | python | def _report_error(self, legacy_message, new_message=None,
schema_suffix=None):
object_expr = self._get_object_expression()
schema_expr = self._get_schema_expression()
if schema_suffix:
schema_expr += schema_suffix
raise ValidationError(legacy_message, new_message, object_expr,
schema_expr) | Report an error during validation.
There are two error messages. The legacy message is used for backwards
compatibility and usually contains the object (possibly very large)
that failed to validate. The new message is much better as it contains
just a short message on what went wrong. User code can inspect
object_expr and schema_expr to see which part of the object failed to
validate against which part of the schema.
The schema_suffix, if provided, is appended to the schema_expr. This
is quite handy to specify the bit that the validator looked at (such as
the type or optional flag, etc). object_suffix serves the same purpose
but is used for object expressions instead. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/validator.py#L142-L164 | null | class Validator(object):
"""
JSON Schema validator.
Can be used to validate any JSON document against a
:class:`json_schema_validator.schema.Schema`.
"""
JSON_TYPE_MAP = {
"string": basestring,
"number": NUMERIC_TYPES,
"integer": int,
"object": dict,
"array": list,
"null": None.__class__,
}
def __init__(self):
self._schema_stack = []
self._object_stack = []
def _push_object(self, obj, path):
self._object_stack.append((obj, path))
def _pop_object(self):
self._object_stack.pop()
def _push_schema(self, schema, path):
self._schema_stack.append((schema, path))
def _pop_schema(self):
self._schema_stack.pop()
@property
def _object(self):
return self._object_stack[-1][0]
@property
def _schema(self):
return self._schema_stack[-1][0]
@classmethod
def validate(cls, schema, obj):
"""
Validate specified JSON object obj with specified schema.
:param schema:
Schema to validate against
:type schema:
:class:`json_schema_validator.schema.Schema`
:param obj:
JSON object to validate
:rtype:
bool
:returns:
True on success
:raises `json_schema_validator.errors.ValidationError`:
if the object does not match schema.
:raises `json_schema_validator.errors.SchemaError`:
if the schema itself is wrong.
"""
if not isinstance(schema, Schema):
raise ValueError(
"schema value {0!r} is not a Schema"
" object".format(schema))
self = cls()
self.validate_toplevel(schema, obj)
return True
def _get_object_expression(self):
return "".join(map(lambda x: x[1], self._object_stack))
def _get_schema_expression(self):
return "".join(map(lambda x: x[1], self._schema_stack))
def validate_toplevel(self, schema, obj):
self._object_stack = []
self._schema_stack = []
self._push_schema(schema, "schema")
self._push_object(obj, "object")
self._validate()
self._pop_schema()
self._pop_object()
def _validate(self):
obj = self._object
self._validate_type()
self._validate_requires()
if isinstance(obj, dict):
self._validate_properties()
self._validate_additional_properties()
elif isinstance(obj, list):
self._validate_items()
else:
self._validate_enum()
self._validate_format()
self._validate_pattern()
if isinstance(obj, basestring):
self._validate_length()
elif isinstance(obj, NUMERIC_TYPES):
self._validate_range()
self._report_unsupported()
def _push_property_schema(self, prop):
"""Construct a sub-schema from a property of the current schema."""
schema = Schema(self._schema.properties[prop])
self._push_schema(schema, ".properties." + prop)
def _push_additional_property_schema(self):
schema = Schema(self._schema.additionalProperties)
self._push_schema(schema, ".additionalProperties")
def _push_array_schema(self):
schema = Schema(self._schema.items)
self._push_schema(schema, ".items")
def _push_array_item_object(self, index):
self._push_object(self._object[index], "[%d]" % index)
def _push_property_object(self, prop):
self._push_object(self._object[prop], "." + prop)
def _report_unsupported(self):
schema = self._schema
if schema.contentEncoding is not None:
raise NotImplementedError("contentEncoding is not supported")
if schema.divisibleBy != 1:
raise NotImplementedError("divisibleBy is not supported")
if schema.disallow is not None:
raise NotImplementedError("disallow is not supported")
def _validate_type(self):
schema = self._schema
json_type = schema.type
if json_type == "any":
return
obj = self._object
if json_type == "boolean":
# Bool is special cased because in python there is no
# way to test for isinstance(something, bool) that would
# not catch isinstance(1, bool) :/
if obj is not True and obj is not False:
self._report_error(
"{obj!r} does not match type {type!r}".format(
obj=obj, type=json_type),
"Object has incorrect type (expected boolean)",
schema_suffix=".type")
elif isinstance(json_type, dict):
# Nested type check. This is pretty odd case. Here we
# don't change our object stack (it's the same object).
self._push_schema(Schema(json_type), ".type")
self._validate()
self._pop_schema()
elif isinstance(json_type, list):
# Alternative type check, here we may match _any_ of the types
# in the list to be considered valid.
json_type_list = json_type
if json_type == []:
return
for index, json_type in enumerate(json_type_list):
# Aww, ugly. The level of packaging around Schema is annoying
self._push_schema(
Schema({'type': json_type}),
".type.%d" % index)
try:
self._validate()
except ValidationError:
# Ignore errors, we just want one thing to match
pass
else:
# We've got a match - break the loop
break
finally:
# Pop the schema regardless of match/mismatch
self._pop_schema()
else:
# We were not interupted (no break) so we did not match
self._report_error(
"{obj!r} does not match any of the types in {type!r}".format(
obj=obj, type=json_type_list),
"Object has incorrect type (multiple types possible)",
schema_suffix=".type")
else:
# Simple type check
if not isinstance(obj, self.JSON_TYPE_MAP[json_type]):
self._report_error(
"{obj!r} does not match type {type!r}".format(
obj=obj, type=json_type),
"Object has incorrect type (expected {type})".format(
type=json_type),
schema_suffix=".type")
def _validate_pattern(self):
ptn = self._schema.pattern
obj = self._object
if ptn is None:
return
if not isinstance(obj, basestring):
return
if re.match(ptn, obj):
return
self._report_error(
"{obj!r} does not match pattern {ptn!r}".format(
obj=obj,ptn=ptn),
"Object does not match pattern (expected {ptn})".format(
ptn=ptn),
schema_suffix=".pattern"
)
def _validate_format(self):
fmt = self._schema.format
obj = self._object
if fmt is None:
return
if fmt == 'date-time':
try:
DATE_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
datetime.datetime.strptime(obj, DATE_TIME_FORMAT)
except ValueError:
self._report_error(
"{obj!r} is not a string representing JSON date-time".format(
obj=obj),
"Object is not a string representing JSON date-time",
schema_suffix=".format")
elif fmt == 'regex':
try:
re.compile(obj)
except:
self._report_error(
"{obj!r} is not a string representing a regex".format(
obj=obj),
"Object is not a string representing a regex",
schema_suffix=".format")
else:
raise NotImplementedError("format {0!r} is not supported".format(fmt))
def _validate_properties(self):
obj = self._object
schema = self._schema
assert isinstance(obj, dict)
for prop in schema.properties.keys():
self._push_property_schema(prop)
if prop in obj:
self._push_property_object(prop)
self._validate()
self._pop_object()
else:
if not self._schema.optional:
self._report_error(
"{obj!r} does not have property {prop!r}".format(
obj=obj, prop=prop),
"Object lacks property {prop!r}".format(
prop=prop),
schema_suffix=".optional")
self._pop_schema()
def _validate_additional_properties(self):
obj = self._object
assert isinstance(obj, dict)
if self._schema.additionalProperties is False:
# Additional properties are disallowed
# Report exception for each unknown property
for prop in obj.keys():
if prop not in self._schema.properties:
self._report_error(
"{obj!r} has unknown property {prop!r} and"
" additionalProperties is false".format(
obj=obj, prop=prop),
"Object has unknown property {prop!r} but"
" additional properties are disallowed".format(
prop=prop),
schema_suffix=".additionalProperties")
else:
# Check each property against this object
self._push_additional_property_schema()
for prop in obj.keys():
self._push_property_object(prop)
self._validate()
self._pop_object()
self._pop_schema()
def _validate_enum(self):
obj = self._object
schema = self._schema
if schema.enum is not None:
for allowed_value in schema.enum:
if obj == allowed_value:
break
else:
self._report_error(
"{obj!r} does not match any value in enumeration"
" {enum!r}".format(obj=obj, enum=schema.enum),
"Object does not match any value in enumeration",
schema_suffix=".enum")
def _validate_length(self):
obj = self._object
schema = self._schema
if schema.minLength is not None:
if len(obj) < schema.minLength:
self._report_error(
"{obj!r} does not meet the minimum length"
" {minLength!r}".format(obj=obj, minLength=schema.minLength),
"Object does not meet the minimum length",
schema_suffix=".minLength")
if schema.maxLength is not None:
if len(obj) > schema.maxLength:
self._report_error(
"{obj!r} exceeds the maximum length"
" {maxLength!r}".format(obj=obj, maxLength=schema.maxLength),
"Object exceeds the maximum length",
schema_suffix=".maxLength")
def _validate_range(self):
obj = self._object
schema = self._schema
if schema.minimum is not None:
if obj < schema.minimum or (obj == schema.minimum and not schema.minimumCanEqual):
self._report_error(
"{obj!r} is less than the minimum"
" {minimum!r}".format(obj=obj, minimum=schema.minimum),
"Object is less than the minimum",
schema_suffix=".minimum")
if schema.maximum is not None:
if obj > schema.maximum or (obj == schema.maximum and not schema.maximumCanEqual):
self._report_error(
"{obj!r} is greater than the maximum"
" {maximum!r}".format(obj=obj, maximum=schema.maximum),
"Object is greater than the maximum",
schema_suffix=".maximum")
def _validate_items(self):
obj = self._object
schema = self._schema
assert isinstance(obj, list)
items_schema_json = schema.items
if items_schema_json == {}:
# default value, don't do anything
return
if isinstance(obj, list) and schema.uniqueItems is True and len(set(obj)) != len(obj):
# If we want a list of unique items and the length of unique
# elements is different from the length of the full list
# then validation fails.
# This implementation isn't strictly compatible with the specs, because
# we are not checking unique dicts.
self._report_error(
"Repeated items found in {obj!r}".format(obj=obj),
"Repeated items found in array",
schema_suffix=".items")
if schema.minItems:
if len(obj) < schema.minItems:
self._report_error(
"{obj!r} has fewer than the minimum number of items"
" {minItems!r}".format(obj=obj, minimum=schema.minItems),
"Object has fewer than the minimum number of items",
schema_suffix=".minItems")
if schema.maxItems is not None:
if len(obj) > schema.maxItems:
self._report_error(
"{obj!r} has more than the maximum number of items"
" {maxItems!r}".format(obj=obj, minimum=schema.maxItems),
"Object has more than the maximum number of items",
schema_suffix=".maxItems")
if isinstance(items_schema_json, dict):
self._push_array_schema()
for index, item in enumerate(obj):
self._push_array_item_object(index)
self._validate()
self._pop_object()
self._pop_schema()
elif isinstance(items_schema_json, list):
if len(obj) < len(items_schema_json):
# If our data array is shorter than the schema then
# validation fails. Longer arrays are okay (during this
# step) as they are validated based on
# additionalProperties schema
self._report_error(
"{obj!r} is shorter than array schema {schema!r}".
format(obj=obj, schema=items_schema_json),
"Object array is shorter than schema array",
schema_suffix=".items")
if len(obj) != len(items_schema_json) and schema.additionalProperties is False:
# If our array is not exactly the same size as the
# schema and additional properties are disallowed then
# validation fails
self._report_error(
"{obj!r} is not of the same length as array schema"
" {schema!r} and additionalProperties is"
" false".format(obj=obj, schema=items_schema_json),
"Object array is not of the same length as schema array",
schema_suffix=".items")
# Validate each array element using schema for the
# corresponding array index, fill missing values (since
# there may be more items in our array than in the schema)
# with additionalProperties which by now is not False
for index, (item, item_schema_json) in enumerate(
zip_longest(
obj, items_schema_json,
fillvalue=schema.additionalProperties)):
item_schema = Schema(item_schema_json)
if index < len(items_schema_json):
self._push_schema(item_schema, "items[%d]" % index)
else:
self._push_schema(item_schema, ".additionalProperties")
self._push_array_item_object(index)
self._validate()
self._pop_schema()
self._pop_object()
def _validate_requires(self):
obj = self._object
schema = self._schema
requires_json = schema.requires
if requires_json == {}:
# default value, don't do anything
return
# Find our enclosing object in the object stack
if len(self._object_stack) < 2:
self._report_error(
"{obj!r} requires that enclosing object matches"
" schema {schema!r} but there is no enclosing"
" object".format(obj=obj, schema=requires_json),
"Object has no enclosing object that matches schema",
schema_suffix=".requires")
# Note: Parent object can be None, (e.g. a null property)
parent_obj = self._object_stack[-2][0]
if isinstance(requires_json, basestring):
# This is a simple property test
if (not isinstance(parent_obj, dict)
or requires_json not in parent_obj):
self._report_error(
"{obj!r} requires presence of property {requires!r}"
" in the same object".format(
obj=obj, requires=requires_json),
"Enclosing object does not have property"
" {prop!r}".format(prop=requires_json),
schema_suffix=".requires")
elif isinstance(requires_json, dict):
# Requires designates a whole schema, the enclosing object
# must match against that schema.
# Here we resort to a small hack. Proper implementation
# would require us to validate the parent object from its
# own context (own list of parent objects). Since doing that
# and restoring the state would be very complicated we just
# instantiate a new validator with a subset of our current
# history here.
sub_validator = Validator()
sub_validator._object_stack = self._object_stack[:-1]
sub_validator._schema_stack = self._schema_stack[:]
sub_validator._push_schema(
Schema(requires_json), ".requires")
sub_validator._validate()
|
zyga/json-schema-validator | json_schema_validator/validator.py | Validator._push_property_schema | python | def _push_property_schema(self, prop):
schema = Schema(self._schema.properties[prop])
self._push_schema(schema, ".properties." + prop) | Construct a sub-schema from a property of the current schema. | train | https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/validator.py#L166-L169 | null | class Validator(object):
"""
JSON Schema validator.
Can be used to validate any JSON document against a
:class:`json_schema_validator.schema.Schema`.
"""
JSON_TYPE_MAP = {
"string": basestring,
"number": NUMERIC_TYPES,
"integer": int,
"object": dict,
"array": list,
"null": None.__class__,
}
def __init__(self):
self._schema_stack = []
self._object_stack = []
def _push_object(self, obj, path):
self._object_stack.append((obj, path))
def _pop_object(self):
self._object_stack.pop()
def _push_schema(self, schema, path):
self._schema_stack.append((schema, path))
def _pop_schema(self):
self._schema_stack.pop()
@property
def _object(self):
return self._object_stack[-1][0]
@property
def _schema(self):
return self._schema_stack[-1][0]
@classmethod
def validate(cls, schema, obj):
"""
Validate specified JSON object obj with specified schema.
:param schema:
Schema to validate against
:type schema:
:class:`json_schema_validator.schema.Schema`
:param obj:
JSON object to validate
:rtype:
bool
:returns:
True on success
:raises `json_schema_validator.errors.ValidationError`:
if the object does not match schema.
:raises `json_schema_validator.errors.SchemaError`:
if the schema itself is wrong.
"""
if not isinstance(schema, Schema):
raise ValueError(
"schema value {0!r} is not a Schema"
" object".format(schema))
self = cls()
self.validate_toplevel(schema, obj)
return True
def _get_object_expression(self):
return "".join(map(lambda x: x[1], self._object_stack))
def _get_schema_expression(self):
return "".join(map(lambda x: x[1], self._schema_stack))
def validate_toplevel(self, schema, obj):
self._object_stack = []
self._schema_stack = []
self._push_schema(schema, "schema")
self._push_object(obj, "object")
self._validate()
self._pop_schema()
self._pop_object()
def _validate(self):
obj = self._object
self._validate_type()
self._validate_requires()
if isinstance(obj, dict):
self._validate_properties()
self._validate_additional_properties()
elif isinstance(obj, list):
self._validate_items()
else:
self._validate_enum()
self._validate_format()
self._validate_pattern()
if isinstance(obj, basestring):
self._validate_length()
elif isinstance(obj, NUMERIC_TYPES):
self._validate_range()
self._report_unsupported()
def _report_error(self, legacy_message, new_message=None,
schema_suffix=None):
"""
Report an error during validation.
There are two error messages. The legacy message is used for backwards
compatibility and usually contains the object (possibly very large)
that failed to validate. The new message is much better as it contains
just a short message on what went wrong. User code can inspect
object_expr and schema_expr to see which part of the object failed to
validate against which part of the schema.
The schema_suffix, if provided, is appended to the schema_expr. This
is quite handy to specify the bit that the validator looked at (such as
the type or optional flag, etc). object_suffix serves the same purpose
but is used for object expressions instead.
"""
object_expr = self._get_object_expression()
schema_expr = self._get_schema_expression()
if schema_suffix:
schema_expr += schema_suffix
raise ValidationError(legacy_message, new_message, object_expr,
schema_expr)
def _push_additional_property_schema(self):
schema = Schema(self._schema.additionalProperties)
self._push_schema(schema, ".additionalProperties")
def _push_array_schema(self):
schema = Schema(self._schema.items)
self._push_schema(schema, ".items")
def _push_array_item_object(self, index):
self._push_object(self._object[index], "[%d]" % index)
def _push_property_object(self, prop):
self._push_object(self._object[prop], "." + prop)
def _report_unsupported(self):
schema = self._schema
if schema.contentEncoding is not None:
raise NotImplementedError("contentEncoding is not supported")
if schema.divisibleBy != 1:
raise NotImplementedError("divisibleBy is not supported")
if schema.disallow is not None:
raise NotImplementedError("disallow is not supported")
def _validate_type(self):
schema = self._schema
json_type = schema.type
if json_type == "any":
return
obj = self._object
if json_type == "boolean":
# Bool is special cased because in python there is no
# way to test for isinstance(something, bool) that would
# not catch isinstance(1, bool) :/
if obj is not True and obj is not False:
self._report_error(
"{obj!r} does not match type {type!r}".format(
obj=obj, type=json_type),
"Object has incorrect type (expected boolean)",
schema_suffix=".type")
elif isinstance(json_type, dict):
# Nested type check. This is pretty odd case. Here we
# don't change our object stack (it's the same object).
self._push_schema(Schema(json_type), ".type")
self._validate()
self._pop_schema()
elif isinstance(json_type, list):
# Alternative type check, here we may match _any_ of the types
# in the list to be considered valid.
json_type_list = json_type
if json_type == []:
return
for index, json_type in enumerate(json_type_list):
# Aww, ugly. The level of packaging around Schema is annoying
self._push_schema(
Schema({'type': json_type}),
".type.%d" % index)
try:
self._validate()
except ValidationError:
# Ignore errors, we just want one thing to match
pass
else:
# We've got a match - break the loop
break
finally:
# Pop the schema regardless of match/mismatch
self._pop_schema()
else:
# We were not interupted (no break) so we did not match
self._report_error(
"{obj!r} does not match any of the types in {type!r}".format(
obj=obj, type=json_type_list),
"Object has incorrect type (multiple types possible)",
schema_suffix=".type")
else:
# Simple type check
if not isinstance(obj, self.JSON_TYPE_MAP[json_type]):
self._report_error(
"{obj!r} does not match type {type!r}".format(
obj=obj, type=json_type),
"Object has incorrect type (expected {type})".format(
type=json_type),
schema_suffix=".type")
def _validate_pattern(self):
ptn = self._schema.pattern
obj = self._object
if ptn is None:
return
if not isinstance(obj, basestring):
return
if re.match(ptn, obj):
return
self._report_error(
"{obj!r} does not match pattern {ptn!r}".format(
obj=obj,ptn=ptn),
"Object does not match pattern (expected {ptn})".format(
ptn=ptn),
schema_suffix=".pattern"
)
def _validate_format(self):
fmt = self._schema.format
obj = self._object
if fmt is None:
return
if fmt == 'date-time':
try:
DATE_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
datetime.datetime.strptime(obj, DATE_TIME_FORMAT)
except ValueError:
self._report_error(
"{obj!r} is not a string representing JSON date-time".format(
obj=obj),
"Object is not a string representing JSON date-time",
schema_suffix=".format")
elif fmt == 'regex':
try:
re.compile(obj)
except:
self._report_error(
"{obj!r} is not a string representing a regex".format(
obj=obj),
"Object is not a string representing a regex",
schema_suffix=".format")
else:
raise NotImplementedError("format {0!r} is not supported".format(fmt))
def _validate_properties(self):
obj = self._object
schema = self._schema
assert isinstance(obj, dict)
for prop in schema.properties.keys():
self._push_property_schema(prop)
if prop in obj:
self._push_property_object(prop)
self._validate()
self._pop_object()
else:
if not self._schema.optional:
self._report_error(
"{obj!r} does not have property {prop!r}".format(
obj=obj, prop=prop),
"Object lacks property {prop!r}".format(
prop=prop),
schema_suffix=".optional")
self._pop_schema()
def _validate_additional_properties(self):
obj = self._object
assert isinstance(obj, dict)
if self._schema.additionalProperties is False:
# Additional properties are disallowed
# Report exception for each unknown property
for prop in obj.keys():
if prop not in self._schema.properties:
self._report_error(
"{obj!r} has unknown property {prop!r} and"
" additionalProperties is false".format(
obj=obj, prop=prop),
"Object has unknown property {prop!r} but"
" additional properties are disallowed".format(
prop=prop),
schema_suffix=".additionalProperties")
else:
# Check each property against this object
self._push_additional_property_schema()
for prop in obj.keys():
self._push_property_object(prop)
self._validate()
self._pop_object()
self._pop_schema()
def _validate_enum(self):
obj = self._object
schema = self._schema
if schema.enum is not None:
for allowed_value in schema.enum:
if obj == allowed_value:
break
else:
self._report_error(
"{obj!r} does not match any value in enumeration"
" {enum!r}".format(obj=obj, enum=schema.enum),
"Object does not match any value in enumeration",
schema_suffix=".enum")
def _validate_length(self):
obj = self._object
schema = self._schema
if schema.minLength is not None:
if len(obj) < schema.minLength:
self._report_error(
"{obj!r} does not meet the minimum length"
" {minLength!r}".format(obj=obj, minLength=schema.minLength),
"Object does not meet the minimum length",
schema_suffix=".minLength")
if schema.maxLength is not None:
if len(obj) > schema.maxLength:
self._report_error(
"{obj!r} exceeds the maximum length"
" {maxLength!r}".format(obj=obj, maxLength=schema.maxLength),
"Object exceeds the maximum length",
schema_suffix=".maxLength")
def _validate_range(self):
obj = self._object
schema = self._schema
if schema.minimum is not None:
if obj < schema.minimum or (obj == schema.minimum and not schema.minimumCanEqual):
self._report_error(
"{obj!r} is less than the minimum"
" {minimum!r}".format(obj=obj, minimum=schema.minimum),
"Object is less than the minimum",
schema_suffix=".minimum")
if schema.maximum is not None:
if obj > schema.maximum or (obj == schema.maximum and not schema.maximumCanEqual):
self._report_error(
"{obj!r} is greater than the maximum"
" {maximum!r}".format(obj=obj, maximum=schema.maximum),
"Object is greater than the maximum",
schema_suffix=".maximum")
def _validate_items(self):
obj = self._object
schema = self._schema
assert isinstance(obj, list)
items_schema_json = schema.items
if items_schema_json == {}:
# default value, don't do anything
return
if isinstance(obj, list) and schema.uniqueItems is True and len(set(obj)) != len(obj):
# If we want a list of unique items and the length of unique
# elements is different from the length of the full list
# then validation fails.
# This implementation isn't strictly compatible with the specs, because
# we are not checking unique dicts.
self._report_error(
"Repeated items found in {obj!r}".format(obj=obj),
"Repeated items found in array",
schema_suffix=".items")
if schema.minItems:
if len(obj) < schema.minItems:
self._report_error(
"{obj!r} has fewer than the minimum number of items"
" {minItems!r}".format(obj=obj, minimum=schema.minItems),
"Object has fewer than the minimum number of items",
schema_suffix=".minItems")
if schema.maxItems is not None:
if len(obj) > schema.maxItems:
self._report_error(
"{obj!r} has more than the maximum number of items"
" {maxItems!r}".format(obj=obj, minimum=schema.maxItems),
"Object has more than the maximum number of items",
schema_suffix=".maxItems")
if isinstance(items_schema_json, dict):
self._push_array_schema()
for index, item in enumerate(obj):
self._push_array_item_object(index)
self._validate()
self._pop_object()
self._pop_schema()
elif isinstance(items_schema_json, list):
if len(obj) < len(items_schema_json):
# If our data array is shorter than the schema then
# validation fails. Longer arrays are okay (during this
# step) as they are validated based on
# additionalProperties schema
self._report_error(
"{obj!r} is shorter than array schema {schema!r}".
format(obj=obj, schema=items_schema_json),
"Object array is shorter than schema array",
schema_suffix=".items")
if len(obj) != len(items_schema_json) and schema.additionalProperties is False:
# If our array is not exactly the same size as the
# schema and additional properties are disallowed then
# validation fails
self._report_error(
"{obj!r} is not of the same length as array schema"
" {schema!r} and additionalProperties is"
" false".format(obj=obj, schema=items_schema_json),
"Object array is not of the same length as schema array",
schema_suffix=".items")
# Validate each array element using schema for the
# corresponding array index, fill missing values (since
# there may be more items in our array than in the schema)
# with additionalProperties which by now is not False
for index, (item, item_schema_json) in enumerate(
zip_longest(
obj, items_schema_json,
fillvalue=schema.additionalProperties)):
item_schema = Schema(item_schema_json)
if index < len(items_schema_json):
self._push_schema(item_schema, "items[%d]" % index)
else:
self._push_schema(item_schema, ".additionalProperties")
self._push_array_item_object(index)
self._validate()
self._pop_schema()
self._pop_object()
def _validate_requires(self):
obj = self._object
schema = self._schema
requires_json = schema.requires
if requires_json == {}:
# default value, don't do anything
return
# Find our enclosing object in the object stack
if len(self._object_stack) < 2:
self._report_error(
"{obj!r} requires that enclosing object matches"
" schema {schema!r} but there is no enclosing"
" object".format(obj=obj, schema=requires_json),
"Object has no enclosing object that matches schema",
schema_suffix=".requires")
# Note: Parent object can be None, (e.g. a null property)
parent_obj = self._object_stack[-2][0]
if isinstance(requires_json, basestring):
# This is a simple property test
if (not isinstance(parent_obj, dict)
or requires_json not in parent_obj):
self._report_error(
"{obj!r} requires presence of property {requires!r}"
" in the same object".format(
obj=obj, requires=requires_json),
"Enclosing object does not have property"
" {prop!r}".format(prop=requires_json),
schema_suffix=".requires")
elif isinstance(requires_json, dict):
# Requires designates a whole schema, the enclosing object
# must match against that schema.
# Here we resort to a small hack. Proper implementation
# would require us to validate the parent object from its
# own context (own list of parent objects). Since doing that
# and restoring the state would be very complicated we just
# instantiate a new validator with a subset of our current
# history here.
sub_validator = Validator()
sub_validator._object_stack = self._object_stack[:-1]
sub_validator._schema_stack = self._schema_stack[:]
sub_validator._push_schema(
Schema(requires_json), ".requires")
sub_validator._validate()
|
llazzaro/analyzerstrategies | analyzerstrategies/periodStrategy.py | PeriodStrategy.increase_and_check_counter | python | def increase_and_check_counter(self):
''' increase counter by one and check whether a period is end '''
self.counter += 1
self.counter %= self.period
if not self.counter:
return True
else:
return False | increase counter by one and check whether a period is end | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/periodStrategy.py#L27-L34 | null | class PeriodStrategy(BaseStrategy):
''' period strategy '''
def __init__(self, config_dict):
super(PeriodStrategy, self).__init__("periodStrategy")
self.config_dict=config_dict
assert int(config_dict[CONF_STRATEGY_PERIOD]) >= 1
self.per_amount=max(1, round(int(config_dict[CONF_INIT_CASH]) / 100)) # buy 1/100 per time
self.period=int(config_dict[CONF_STRATEGY_PERIOD])
self.symbols=None
self.counter=0
def tickUpdate(self, tick_dict):
''' consume ticks '''
assert self.symbols
assert self.symbols[0] in tick_dict.keys()
symbol=self.symbols[0]
tick=tick_dict[symbol]
if self.increase_and_check_counter():
self.place_order(Order(account=self.account,
action=Action.BUY,
is_market=True,
symbol=symbol,
price=tick.close,
share=self.per_amount / float(tick.close)))
|
llazzaro/analyzerstrategies | analyzerstrategies/periodStrategy.py | PeriodStrategy.tickUpdate | python | def tickUpdate(self, tick_dict):
''' consume ticks '''
assert self.symbols
assert self.symbols[0] in tick_dict.keys()
symbol=self.symbols[0]
tick=tick_dict[symbol]
if self.increase_and_check_counter():
self.place_order(Order(account=self.account,
action=Action.BUY,
is_market=True,
symbol=symbol,
price=tick.close,
share=self.per_amount / float(tick.close))) | consume ticks | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/periodStrategy.py#L36-L49 | null | class PeriodStrategy(BaseStrategy):
''' period strategy '''
def __init__(self, config_dict):
super(PeriodStrategy, self).__init__("periodStrategy")
self.config_dict=config_dict
assert int(config_dict[CONF_STRATEGY_PERIOD]) >= 1
self.per_amount=max(1, round(int(config_dict[CONF_INIT_CASH]) / 100)) # buy 1/100 per time
self.period=int(config_dict[CONF_STRATEGY_PERIOD])
self.symbols=None
self.counter=0
def increase_and_check_counter(self):
''' increase counter by one and check whether a period is end '''
self.counter += 1
self.counter %= self.period
if not self.counter:
return True
else:
return False
|
llazzaro/analyzerstrategies | analyzerstrategies/zscorePortfolioStrategy.py | ZscorePortfolioStrategy.__setUpTrakers | python | def __setUpTrakers(self):
''' set symbols '''
for symbol in self.symbols:
self.__trakers[symbol]=OneTraker(symbol, self, self.buyingRatio) | set symbols | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscorePortfolioStrategy.py#L35-L38 | null | class ZscorePortfolioStrategy(BaseStrategy):
''' period strategy '''
def __init__(self, configDict):
''' constructor '''
super(ZscorePortfolioStrategy, self).__init__("zscorePortfolioStrategy")
self.__trakers={}
self.startDate=int(configDict.get(CONF_START_TRADE_DATE))
self.buyingRatio=int(configDict.get(CONF_BUYING_RATIO) if CONF_BUYING_RATIO in configDict else 2)
def orderExecuted(self, orderDict):
''' call back for executed order '''
for orderId, order in orderDict.items():
if order.symbol in self.__trakers.keys():
self.__trakers[order.symbol].orderExecuted(orderId)
def tickUpdate(self, tickDict):
''' consume ticks '''
if not self.__trakers:
self.__setUpTrakers()
for symbol, tick in tickDict.items():
if symbol in self.__trakers:
self.__trakers[symbol].tickUpdate(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/zscorePortfolioStrategy.py | ZscorePortfolioStrategy.orderExecuted | python | def orderExecuted(self, orderDict):
''' call back for executed order '''
for orderId, order in orderDict.items():
if order.symbol in self.__trakers.keys():
self.__trakers[order.symbol].orderExecuted(orderId) | call back for executed order | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscorePortfolioStrategy.py#L40-L44 | null | class ZscorePortfolioStrategy(BaseStrategy):
''' period strategy '''
def __init__(self, configDict):
''' constructor '''
super(ZscorePortfolioStrategy, self).__init__("zscorePortfolioStrategy")
self.__trakers={}
self.startDate=int(configDict.get(CONF_START_TRADE_DATE))
self.buyingRatio=int(configDict.get(CONF_BUYING_RATIO) if CONF_BUYING_RATIO in configDict else 2)
def __setUpTrakers(self):
''' set symbols '''
for symbol in self.symbols:
self.__trakers[symbol]=OneTraker(symbol, self, self.buyingRatio)
def tickUpdate(self, tickDict):
''' consume ticks '''
if not self.__trakers:
self.__setUpTrakers()
for symbol, tick in tickDict.items():
if symbol in self.__trakers:
self.__trakers[symbol].tickUpdate(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/zscorePortfolioStrategy.py | ZscorePortfolioStrategy.tickUpdate | python | def tickUpdate(self, tickDict):
''' consume ticks '''
if not self.__trakers:
self.__setUpTrakers()
for symbol, tick in tickDict.items():
if symbol in self.__trakers:
self.__trakers[symbol].tickUpdate(tick) | consume ticks | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscorePortfolioStrategy.py#L46-L53 | null | class ZscorePortfolioStrategy(BaseStrategy):
''' period strategy '''
def __init__(self, configDict):
''' constructor '''
super(ZscorePortfolioStrategy, self).__init__("zscorePortfolioStrategy")
self.__trakers={}
self.startDate=int(configDict.get(CONF_START_TRADE_DATE))
self.buyingRatio=int(configDict.get(CONF_BUYING_RATIO) if CONF_BUYING_RATIO in configDict else 2)
def __setUpTrakers(self):
''' set symbols '''
for symbol in self.symbols:
self.__trakers[symbol]=OneTraker(symbol, self, self.buyingRatio)
def orderExecuted(self, orderDict):
''' call back for executed order '''
for orderId, order in orderDict.items():
if order.symbol in self.__trakers.keys():
self.__trakers[order.symbol].orderExecuted(orderId)
|
llazzaro/analyzerstrategies | analyzerstrategies/zscorePortfolioStrategy.py | OneTraker.__getCashToBuyStock | python | def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.buyingPower >= account.getTotalValue() / self.__buyingRatio):
return account.getTotalValue() / self.__buyingRatio
else:
return 0 | calculate the amount of money to buy stock | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscorePortfolioStrategy.py#L74-L81 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, symbol, strategy, buyingRatio):
''' constructor '''
self.__symbol=symbol
self.__strategy=strategy
self.__startDate=strategy.startDate
self.__buyingRatio=buyingRatio
self.__buyThreshold=-2
self.__sellThreshold=0.5
self.__priceZscore=ZScore(120)
self.__volumeZscore=ZScore(120)
self.__toSell=False
self.__toBuy=False
# order id
self.__position=0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0 or self.__position > 0:
return
share=math.floor(cash / float(tick.close)) - self.__position
order=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
symbol=self.__symbol,
share=share)
if self.__strategy.placeOrder(order):
self.__position=math.floor(cash / float(tick.close))
def __placeSellOrder(self, tick):
''' place sell order '''
if self.__position < 0:
return
share=self.__position
order=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_market=True,
symbol=self.__symbol,
share=-share)
if self.__strategy.placeOrder(order):
self.__position=0
def orderExecuted(self, orderId):
''' call back for executed order '''
return
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__symbol, tick.time, tick.close))
self.__priceZscore(tick.close)
self.__volumeZscore(tick.volume)
# if haven't started, don't do any trading
if tick.time <= self.__startDate:
return
# if not enough data, skip to reduce risk
if not self.__priceZscore.getLastValue() or not self.__volumeZscore.getLastValue():
return
# get zscore
priceZscore=self.__priceZscore.getLastValue()
volumeZscore=self.__volumeZscore.getLastValue()
if priceZscore is None or volumeZscore is None:
return
if priceZscore < self.__buyThreshold and self.__position <= 0 and abs(volumeZscore) > 1:
self.__placeBuyOrder(tick)
elif priceZscore > self.__sellThreshold and self.__position > 0 and abs(volumeZscore) > 1:
self.__placeSellOrder(tick)
"""
if self.__toBuy:
self.__placeBuyOrder(tick)
self.__toBuy=False
return
if self.__toSell:
self.__placeSellOrder(tick)
self.__toSell=False
return
if priceZscore < (-self.__threshold) and not self.__buyOrder and abs(volumeZscore) > 1.5:
self.__toBuy=True
elif self.__buyOrder and priceZscore > 0.5:
self.__toSell=True
"""
|
llazzaro/analyzerstrategies | analyzerstrategies/zscorePortfolioStrategy.py | OneTraker.tickUpdate | python | def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__symbol, tick.time, tick.close))
self.__priceZscore(tick.close)
self.__volumeZscore(tick.volume)
# if haven't started, don't do any trading
if tick.time <= self.__startDate:
return
# if not enough data, skip to reduce risk
if not self.__priceZscore.getLastValue() or not self.__volumeZscore.getLastValue():
return
# get zscore
priceZscore=self.__priceZscore.getLastValue()
volumeZscore=self.__volumeZscore.getLastValue()
if priceZscore is None or volumeZscore is None:
return
if priceZscore < self.__buyThreshold and self.__position <= 0 and abs(volumeZscore) > 1:
self.__placeBuyOrder(tick)
elif priceZscore > self.__sellThreshold and self.__position > 0 and abs(volumeZscore) > 1:
self.__placeSellOrder(tick)
"""
if self.__toBuy:
self.__placeBuyOrder(tick)
self.__toBuy=False
return
if self.__toSell:
self.__placeSellOrder(tick)
self.__toSell=False
return
if priceZscore < (-self.__threshold) and not self.__buyOrder and abs(volumeZscore) > 1.5:
self.__toBuy=True
elif self.__buyOrder and priceZscore > 0.5:
self.__toSell=True
""" | consume ticks | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscorePortfolioStrategy.py#L116-L157 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, symbol, strategy, buyingRatio):
''' constructor '''
self.__symbol=symbol
self.__strategy=strategy
self.__startDate=strategy.startDate
self.__buyingRatio=buyingRatio
self.__buyThreshold=-2
self.__sellThreshold=0.5
self.__priceZscore=ZScore(120)
self.__volumeZscore=ZScore(120)
self.__toSell=False
self.__toBuy=False
# order id
self.__position=0
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.buyingPower >= account.getTotalValue() / self.__buyingRatio):
return account.getTotalValue() / self.__buyingRatio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0 or self.__position > 0:
return
share=math.floor(cash / float(tick.close)) - self.__position
order=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
symbol=self.__symbol,
share=share)
if self.__strategy.placeOrder(order):
self.__position=math.floor(cash / float(tick.close))
def __placeSellOrder(self, tick):
''' place sell order '''
if self.__position < 0:
return
share=self.__position
order=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_market=True,
symbol=self.__symbol,
share=-share)
if self.__strategy.placeOrder(order):
self.__position=0
def orderExecuted(self, orderId):
''' call back for executed order '''
return
|
llazzaro/analyzerstrategies | analyzerstrategies/zscoreMomentumPortfolioStrategy.py | OneTraker.__placeBuyOrder | python | def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
order=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
symbol=self.__symbol,
share=share)
if self.__strategy.placeOrder(order):
self.__position=share
self.__buyPrice=tick.close | place buy order | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscoreMomentumPortfolioStrategy.py#L85-L99 | [
"def __getCashToBuyStock(self):\n ''' calculate the amount of money to buy stock '''\n account=self.__strategy.getAccountCopy()\n\n if (account.buyingPower >= account.getTotalValue() / self.__buyingRatio):\n return account.getTotalValue() / self.__buyingRatio\n else:\n return 0\n"
] | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, symbol, strategy, buyingRatio):
''' constructor '''
self.__symbol=symbol
self.__strategy=strategy
self.__startDate=strategy.startDate
self.__buyingRatio=buyingRatio
self.__buyThreshold=1.5
self.__sellThreshold=0.5
self.__preZscore=None
self.__priceZscore=ZScore(150)
self.__volumeZscore=ZScore(150)
self.__dayCounter=0
self.__dayCounterThreshold=5
# order id
self.__position=0
self.__buyPrice=0
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.buyingPower >= account.getTotalValue() / self.__buyingRatio):
return account.getTotalValue() / self.__buyingRatio
else:
return 0
def __placeSellOrder(self, tick):
''' place sell order '''
if self.__position < 0:
return
share=self.__position
order=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_market=True,
symbol=self.__symbol,
share=-share)
if self.__strategy.placeOrder(order):
self.__position=0
self.__buyPrice=0
def orderExecuted(self, orderId):
''' call back for executed order '''
return
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__symbol, tick.time, tick.close))
self.__priceZscore(tick.close)
self.__volumeZscore(tick.volume)
# get zscore
priceZscore=self.__priceZscore.getLastValue()
volumeZscore=self.__volumeZscore.getLastValue()
# if haven't started, don't do any trading
if tick.time <= self.__startDate:
return
# if not enough data, skip to reduce risk
if priceZscore is None or volumeZscore is None:
return
if self.__position > 0:
self.__dayCounter += 1
if priceZscore > self.__buyThreshold and self.__preZscore and self.__preZscore < self.__buyThreshold and self.__position <= 0 and abs(volumeZscore) > 1:
self.__placeBuyOrder(tick)
elif self.__position > 0:
if (self.__dayCounter > self.__dayCounterThreshold and priceZscore < self.__sellThreshold)\
or priceZscore < 0 or self.__buyPrice * 0.9 > tick.close:
self.__placeSellOrder(tick)
self.__dayCounter=0
self.__preZscore=priceZscore
|
llazzaro/analyzerstrategies | analyzerstrategies/zscoreMomentumPortfolioStrategy.py | OneTraker.__placeSellOrder | python | def __placeSellOrder(self, tick):
''' place sell order '''
if self.__position < 0:
return
share=self.__position
order=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_market=True,
symbol=self.__symbol,
share=-share)
if self.__strategy.placeOrder(order):
self.__position=0
self.__buyPrice=0 | place sell order | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscoreMomentumPortfolioStrategy.py#L101-L114 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, symbol, strategy, buyingRatio):
''' constructor '''
self.__symbol=symbol
self.__strategy=strategy
self.__startDate=strategy.startDate
self.__buyingRatio=buyingRatio
self.__buyThreshold=1.5
self.__sellThreshold=0.5
self.__preZscore=None
self.__priceZscore=ZScore(150)
self.__volumeZscore=ZScore(150)
self.__dayCounter=0
self.__dayCounterThreshold=5
# order id
self.__position=0
self.__buyPrice=0
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.buyingPower >= account.getTotalValue() / self.__buyingRatio):
return account.getTotalValue() / self.__buyingRatio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
order=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
symbol=self.__symbol,
share=share)
if self.__strategy.placeOrder(order):
self.__position=share
self.__buyPrice=tick.close
def orderExecuted(self, orderId):
''' call back for executed order '''
return
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__symbol, tick.time, tick.close))
self.__priceZscore(tick.close)
self.__volumeZscore(tick.volume)
# get zscore
priceZscore=self.__priceZscore.getLastValue()
volumeZscore=self.__volumeZscore.getLastValue()
# if haven't started, don't do any trading
if tick.time <= self.__startDate:
return
# if not enough data, skip to reduce risk
if priceZscore is None or volumeZscore is None:
return
if self.__position > 0:
self.__dayCounter += 1
if priceZscore > self.__buyThreshold and self.__preZscore and self.__preZscore < self.__buyThreshold and self.__position <= 0 and abs(volumeZscore) > 1:
self.__placeBuyOrder(tick)
elif self.__position > 0:
if (self.__dayCounter > self.__dayCounterThreshold and priceZscore < self.__sellThreshold)\
or priceZscore < 0 or self.__buyPrice * 0.9 > tick.close:
self.__placeSellOrder(tick)
self.__dayCounter=0
self.__preZscore=priceZscore
|
llazzaro/analyzerstrategies | analyzerstrategies/zscoreMomentumPortfolioStrategy.py | OneTraker.tickUpdate | python | def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__symbol, tick.time, tick.close))
self.__priceZscore(tick.close)
self.__volumeZscore(tick.volume)
# get zscore
priceZscore=self.__priceZscore.getLastValue()
volumeZscore=self.__volumeZscore.getLastValue()
# if haven't started, don't do any trading
if tick.time <= self.__startDate:
return
# if not enough data, skip to reduce risk
if priceZscore is None or volumeZscore is None:
return
if self.__position > 0:
self.__dayCounter += 1
if priceZscore > self.__buyThreshold and self.__preZscore and self.__preZscore < self.__buyThreshold and self.__position <= 0 and abs(volumeZscore) > 1:
self.__placeBuyOrder(tick)
elif self.__position > 0:
if (self.__dayCounter > self.__dayCounterThreshold and priceZscore < self.__sellThreshold)\
or priceZscore < 0 or self.__buyPrice * 0.9 > tick.close:
self.__placeSellOrder(tick)
self.__dayCounter=0
self.__preZscore=priceZscore | consume ticks | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/zscoreMomentumPortfolioStrategy.py#L120-L149 | [
"def __placeBuyOrder(self, tick):\n ''' place buy order'''\n cash=self.__getCashToBuyStock()\n if cash == 0:\n return\n\n share=math.floor(cash / float(tick.close))\n order=Order(accountId=self.__strategy.accountId,\n action=Action.BUY,\n is_market=True,\n symbol=self.__symbol,\n share=share)\n if self.__strategy.placeOrder(order):\n self.__position=share\n self.__buyPrice=tick.close\n",
"def __placeSellOrder(self, tick):\n ''' place sell order '''\n if self.__position < 0:\n return\n\n share=self.__position\n order=Order(accountId=self.__strategy.accountId,\n action=Action.SELL,\n is_market=True,\n symbol=self.__symbol,\n share=-share)\n if self.__strategy.placeOrder(order):\n self.__position=0\n self.__buyPrice=0\n"
] | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, symbol, strategy, buyingRatio):
''' constructor '''
self.__symbol=symbol
self.__strategy=strategy
self.__startDate=strategy.startDate
self.__buyingRatio=buyingRatio
self.__buyThreshold=1.5
self.__sellThreshold=0.5
self.__preZscore=None
self.__priceZscore=ZScore(150)
self.__volumeZscore=ZScore(150)
self.__dayCounter=0
self.__dayCounterThreshold=5
# order id
self.__position=0
self.__buyPrice=0
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.buyingPower >= account.getTotalValue() / self.__buyingRatio):
return account.getTotalValue() / self.__buyingRatio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
order=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
symbol=self.__symbol,
share=share)
if self.__strategy.placeOrder(order):
self.__position=share
self.__buyPrice=tick.close
def __placeSellOrder(self, tick):
''' place sell order '''
if self.__position < 0:
return
share=self.__position
order=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_market=True,
symbol=self.__symbol,
share=-share)
if self.__strategy.placeOrder(order):
self.__position=0
self.__buyPrice=0
def orderExecuted(self, orderId):
''' call back for executed order '''
return
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | SMAPortfolioStrategy.__setUpTrakers | python | def __setUpTrakers(self):
''' set securities '''
for security in self.securities:
self.__trakers[security]=OneTraker(security, self, self.buying_ratio) | set securities | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L48-L51 | null | class SMAPortfolioStrategy(BaseStrategy):
''' period strategy '''
def __init__(self, account, config, securities, store):
''' constructor '''
super(SMAPortfolioStrategy, self).__init__("smaPortfolioStrategy", account)
self.securities = securities
self.__trakers={}
self.start_date=datetime.strptime(config.get('analyzer', CONF_START_TRADE_DATE), '%d/%m/%Y').date()
self.buying_ratio=int(config.get('analyzer', CONF_BUYING_RATIO) or 25)
def order_executed(self, orderDict):
''' call back for executed order '''
for orderId, order in orderDict.items():
if order.security in self.__trakers.keys():
self.__trakers[order.security].orderExecuted(orderId)
def update(self, tickDict):
''' consume ticks '''
if not self.__trakers:
self.__setUpTrakers()
for security, tick in tickDict.items():
if security in self.__trakers:
self.__trakers[security].tickUpdate(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | SMAPortfolioStrategy.order_executed | python | def order_executed(self, orderDict):
''' call back for executed order '''
for orderId, order in orderDict.items():
if order.security in self.__trakers.keys():
self.__trakers[order.security].orderExecuted(orderId) | call back for executed order | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L53-L57 | null | class SMAPortfolioStrategy(BaseStrategy):
''' period strategy '''
def __init__(self, account, config, securities, store):
''' constructor '''
super(SMAPortfolioStrategy, self).__init__("smaPortfolioStrategy", account)
self.securities = securities
self.__trakers={}
self.start_date=datetime.strptime(config.get('analyzer', CONF_START_TRADE_DATE), '%d/%m/%Y').date()
self.buying_ratio=int(config.get('analyzer', CONF_BUYING_RATIO) or 25)
def __setUpTrakers(self):
''' set securities '''
for security in self.securities:
self.__trakers[security]=OneTraker(security, self, self.buying_ratio)
def update(self, tickDict):
''' consume ticks '''
if not self.__trakers:
self.__setUpTrakers()
for security, tick in tickDict.items():
if security in self.__trakers:
self.__trakers[security].tickUpdate(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | SMAPortfolioStrategy.update | python | def update(self, tickDict):
''' consume ticks '''
if not self.__trakers:
self.__setUpTrakers()
for security, tick in tickDict.items():
if security in self.__trakers:
self.__trakers[security].tickUpdate(tick) | consume ticks | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L59-L66 | null | class SMAPortfolioStrategy(BaseStrategy):
''' period strategy '''
def __init__(self, account, config, securities, store):
''' constructor '''
super(SMAPortfolioStrategy, self).__init__("smaPortfolioStrategy", account)
self.securities = securities
self.__trakers={}
self.start_date=datetime.strptime(config.get('analyzer', CONF_START_TRADE_DATE), '%d/%m/%Y').date()
self.buying_ratio=int(config.get('analyzer', CONF_BUYING_RATIO) or 25)
def __setUpTrakers(self):
''' set securities '''
for security in self.securities:
self.__trakers[security]=OneTraker(security, self, self.buying_ratio)
def order_executed(self, orderDict):
''' call back for executed order '''
for orderId, order in orderDict.items():
if order.security in self.__trakers.keys():
self.__trakers[order.security].orderExecuted(orderId)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | OneTraker.__buyIfMeet | python | def __buyIfMeet(self, tick):
''' place buy order if conditions meet '''
# place short sell order
'''
if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()):
if tick.close/self.__previousMovingLowWeek < 0.95:
return
if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1):
# assume no commission fee for now
self.__placeSellShortOrder(tick)
elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue():
# assume no commission fee for now
self.__placeSellShortOrder(tick)
'''
# place buy order
if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()):
if tick.close / self.__previousMovingLowWeek > 1.05:
return
if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick) | place buy order if conditions meet | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L102-L129 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, security, strategy, buying_ratio):
''' constructor '''
self.__security=security
self.__strategy=strategy
self.start_date=strategy.start_date
self.buying_ratio=buying_ratio
# order id
self.__stopOrderId=None
self.__stopOrder=None
self.__buyOrder=None
self.__smaShort=Sma(10)
self.__smaMid=Sma(60)
self.__smaLong=Sma(200)
self.__smaVolumeShort=Sma(10)
self.__smaVolumeMid=Sma(60)
self.__movingLowShort=MovingLow(10)
self.__movingLowWeek=MovingLow(3)
# state of previous day
self.__previousTick=None
self.__previousSmaShort=None
self.__previousMovingLowShort=None
self.__previousMovingLowWeek=None
self.__previousSmaMid=None
self.__previousSmaLong=None
self.__previousSmaVolumeShort=None
self.__previousSmaVolumeMid=None
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __placeStopOrder(self, order):
''' place stop order '''
orderId=self.__strategy.placeOrder(order)
if orderId:
self.__stopOrderId=orderId
self.__stopOrder=order
else:
LOG.error("Can't place stop order %s" % order)
def __sellIfMeet(self, tick):
''' place sell order if conditions meet '''
pass
def orderExecuted(self, orderId):
''' call back for executed order '''
if orderId == self.__stopOrderId:
LOG.debug("smaStrategy stop order canceled %s" % orderId)
# stop order executed
self.__clearStopOrder()
def __clearStopOrder(self):
''' clear stop order status '''
self.__stopOrderId=None
self.__stopOrder=None
def __adjustStopOrder(self, tick):
''' update stop order if needed '''
if not self.__stopOrderId:
return
if self.__stopOrder.action == Action.SELL:
orgStopPrice=self.__buyOrder.price * 0.95
newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85)
newStopPrice=min(newStopPrice, tick.close * 0.95)
if newStopPrice > self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
elif self.__stopOrder.action == Action.BUY_TO_COVER:
orgStopPrice=self.__buyOrder.price * 1.05
newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15)
newStopPrice=max(newStopPrice, tick.close * 1.05)
if newStopPrice < self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
type=Type.STOP,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
def __updatePreviousState(self, tick):
''' update previous state '''
self.__previousTick=tick
self.__previousSmaShort=self.__smaShort.getLastValue()
self.__previousSmaMid=self.__smaMid.getLastValue()
self.__previousSmaLong=self.__smaLong.getLastValue()
self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue()
self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue()
self.__previousMovingLowShort=self.__movingLowShort.getLastValue()
self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue()
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__security, tick.time, tick.close))
# update sma
self.__smaShort(tick.close)
self.__smaMid(tick.close)
self.__smaLong(tick.close)
self.__smaVolumeShort(tick.volume)
self.__smaVolumeMid(tick.volume)
self.__movingLowShort(tick.close)
self.__movingLowWeek(tick.close)
# if not enough data, skip to reduce risk -- SKIP NEWLY IPOs
if not self.__smaLong.getLastValue() or not self.__smaMid.getLastValue() or not self.__smaShort.getLastValue():
self.__updatePreviousState(tick)
return
# if haven't started, don't do any trading
if tick.time <= self.start_date:
return
# already have some holdings
if self.__stopOrderId:
self.__sellIfMeet(tick)
self.__adjustStopOrder(tick)
# don't have any holdings
if not self.__stopOrderId and self.__getCashToBuyStock():
self.__buyIfMeet(tick)
self.__updatePreviousState(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | OneTraker.__placeSellShortOrder | python | def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder) | place short sell order | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L131-L150 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, security, strategy, buying_ratio):
''' constructor '''
self.__security=security
self.__strategy=strategy
self.start_date=strategy.start_date
self.buying_ratio=buying_ratio
# order id
self.__stopOrderId=None
self.__stopOrder=None
self.__buyOrder=None
self.__smaShort=Sma(10)
self.__smaMid=Sma(60)
self.__smaLong=Sma(200)
self.__smaVolumeShort=Sma(10)
self.__smaVolumeMid=Sma(60)
self.__movingLowShort=MovingLow(10)
self.__movingLowWeek=MovingLow(3)
# state of previous day
self.__previousTick=None
self.__previousSmaShort=None
self.__previousMovingLowShort=None
self.__previousMovingLowWeek=None
self.__previousSmaMid=None
self.__previousSmaLong=None
self.__previousSmaVolumeShort=None
self.__previousSmaVolumeMid=None
def __buyIfMeet(self, tick):
''' place buy order if conditions meet '''
# place short sell order
'''
if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()):
if tick.close/self.__previousMovingLowWeek < 0.95:
return
if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1):
# assume no commission fee for now
self.__placeSellShortOrder(tick)
elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue():
# assume no commission fee for now
self.__placeSellShortOrder(tick)
'''
# place buy order
if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()):
if tick.close / self.__previousMovingLowWeek > 1.05:
return
if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __placeStopOrder(self, order):
''' place stop order '''
orderId=self.__strategy.placeOrder(order)
if orderId:
self.__stopOrderId=orderId
self.__stopOrder=order
else:
LOG.error("Can't place stop order %s" % order)
def __sellIfMeet(self, tick):
''' place sell order if conditions meet '''
pass
def orderExecuted(self, orderId):
''' call back for executed order '''
if orderId == self.__stopOrderId:
LOG.debug("smaStrategy stop order canceled %s" % orderId)
# stop order executed
self.__clearStopOrder()
def __clearStopOrder(self):
''' clear stop order status '''
self.__stopOrderId=None
self.__stopOrder=None
def __adjustStopOrder(self, tick):
''' update stop order if needed '''
if not self.__stopOrderId:
return
if self.__stopOrder.action == Action.SELL:
orgStopPrice=self.__buyOrder.price * 0.95
newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85)
newStopPrice=min(newStopPrice, tick.close * 0.95)
if newStopPrice > self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
elif self.__stopOrder.action == Action.BUY_TO_COVER:
orgStopPrice=self.__buyOrder.price * 1.05
newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15)
newStopPrice=max(newStopPrice, tick.close * 1.05)
if newStopPrice < self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
type=Type.STOP,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
def __updatePreviousState(self, tick):
''' update previous state '''
self.__previousTick=tick
self.__previousSmaShort=self.__smaShort.getLastValue()
self.__previousSmaMid=self.__smaMid.getLastValue()
self.__previousSmaLong=self.__smaLong.getLastValue()
self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue()
self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue()
self.__previousMovingLowShort=self.__movingLowShort.getLastValue()
self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue()
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__security, tick.time, tick.close))
# update sma
self.__smaShort(tick.close)
self.__smaMid(tick.close)
self.__smaLong(tick.close)
self.__smaVolumeShort(tick.volume)
self.__smaVolumeMid(tick.volume)
self.__movingLowShort(tick.close)
self.__movingLowWeek(tick.close)
# if not enough data, skip to reduce risk -- SKIP NEWLY IPOs
if not self.__smaLong.getLastValue() or not self.__smaMid.getLastValue() or not self.__smaShort.getLastValue():
self.__updatePreviousState(tick)
return
# if haven't started, don't do any trading
if tick.time <= self.start_date:
return
# already have some holdings
if self.__stopOrderId:
self.__sellIfMeet(tick)
self.__adjustStopOrder(tick)
# don't have any holdings
if not self.__stopOrderId and self.__getCashToBuyStock():
self.__buyIfMeet(tick)
self.__updatePreviousState(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | OneTraker.__getCashToBuyStock | python | def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0 | calculate the amount of money to buy stock | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L152-L158 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, security, strategy, buying_ratio):
''' constructor '''
self.__security=security
self.__strategy=strategy
self.start_date=strategy.start_date
self.buying_ratio=buying_ratio
# order id
self.__stopOrderId=None
self.__stopOrder=None
self.__buyOrder=None
self.__smaShort=Sma(10)
self.__smaMid=Sma(60)
self.__smaLong=Sma(200)
self.__smaVolumeShort=Sma(10)
self.__smaVolumeMid=Sma(60)
self.__movingLowShort=MovingLow(10)
self.__movingLowWeek=MovingLow(3)
# state of previous day
self.__previousTick=None
self.__previousSmaShort=None
self.__previousMovingLowShort=None
self.__previousMovingLowWeek=None
self.__previousSmaMid=None
self.__previousSmaLong=None
self.__previousSmaVolumeShort=None
self.__previousSmaVolumeMid=None
def __buyIfMeet(self, tick):
''' place buy order if conditions meet '''
# place short sell order
'''
if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()):
if tick.close/self.__previousMovingLowWeek < 0.95:
return
if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1):
# assume no commission fee for now
self.__placeSellShortOrder(tick)
elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue():
# assume no commission fee for now
self.__placeSellShortOrder(tick)
'''
# place buy order
if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()):
if tick.close / self.__previousMovingLowWeek > 1.05:
return
if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __placeStopOrder(self, order):
''' place stop order '''
orderId=self.__strategy.placeOrder(order)
if orderId:
self.__stopOrderId=orderId
self.__stopOrder=order
else:
LOG.error("Can't place stop order %s" % order)
def __sellIfMeet(self, tick):
''' place sell order if conditions meet '''
pass
def orderExecuted(self, orderId):
''' call back for executed order '''
if orderId == self.__stopOrderId:
LOG.debug("smaStrategy stop order canceled %s" % orderId)
# stop order executed
self.__clearStopOrder()
def __clearStopOrder(self):
''' clear stop order status '''
self.__stopOrderId=None
self.__stopOrder=None
def __adjustStopOrder(self, tick):
''' update stop order if needed '''
if not self.__stopOrderId:
return
if self.__stopOrder.action == Action.SELL:
orgStopPrice=self.__buyOrder.price * 0.95
newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85)
newStopPrice=min(newStopPrice, tick.close * 0.95)
if newStopPrice > self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
elif self.__stopOrder.action == Action.BUY_TO_COVER:
orgStopPrice=self.__buyOrder.price * 1.05
newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15)
newStopPrice=max(newStopPrice, tick.close * 1.05)
if newStopPrice < self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
type=Type.STOP,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
def __updatePreviousState(self, tick):
''' update previous state '''
self.__previousTick=tick
self.__previousSmaShort=self.__smaShort.getLastValue()
self.__previousSmaMid=self.__smaMid.getLastValue()
self.__previousSmaLong=self.__smaLong.getLastValue()
self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue()
self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue()
self.__previousMovingLowShort=self.__movingLowShort.getLastValue()
self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue()
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__security, tick.time, tick.close))
# update sma
self.__smaShort(tick.close)
self.__smaMid(tick.close)
self.__smaLong(tick.close)
self.__smaVolumeShort(tick.volume)
self.__smaVolumeMid(tick.volume)
self.__movingLowShort(tick.close)
self.__movingLowWeek(tick.close)
# if not enough data, skip to reduce risk -- SKIP NEWLY IPOs
if not self.__smaLong.getLastValue() or not self.__smaMid.getLastValue() or not self.__smaShort.getLastValue():
self.__updatePreviousState(tick)
return
# if haven't started, don't do any trading
if tick.time <= self.start_date:
return
# already have some holdings
if self.__stopOrderId:
self.__sellIfMeet(tick)
self.__adjustStopOrder(tick)
# don't have any holdings
if not self.__stopOrderId and self.__getCashToBuyStock():
self.__buyIfMeet(tick)
self.__updatePreviousState(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | OneTraker.__placeBuyOrder | python | def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder) | place buy order | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L160-L182 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, security, strategy, buying_ratio):
''' constructor '''
self.__security=security
self.__strategy=strategy
self.start_date=strategy.start_date
self.buying_ratio=buying_ratio
# order id
self.__stopOrderId=None
self.__stopOrder=None
self.__buyOrder=None
self.__smaShort=Sma(10)
self.__smaMid=Sma(60)
self.__smaLong=Sma(200)
self.__smaVolumeShort=Sma(10)
self.__smaVolumeMid=Sma(60)
self.__movingLowShort=MovingLow(10)
self.__movingLowWeek=MovingLow(3)
# state of previous day
self.__previousTick=None
self.__previousSmaShort=None
self.__previousMovingLowShort=None
self.__previousMovingLowWeek=None
self.__previousSmaMid=None
self.__previousSmaLong=None
self.__previousSmaVolumeShort=None
self.__previousSmaVolumeMid=None
def __buyIfMeet(self, tick):
''' place buy order if conditions meet '''
# place short sell order
'''
if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()):
if tick.close/self.__previousMovingLowWeek < 0.95:
return
if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1):
# assume no commission fee for now
self.__placeSellShortOrder(tick)
elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue():
# assume no commission fee for now
self.__placeSellShortOrder(tick)
'''
# place buy order
if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()):
if tick.close / self.__previousMovingLowWeek > 1.05:
return
if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0
def __placeStopOrder(self, order):
''' place stop order '''
orderId=self.__strategy.placeOrder(order)
if orderId:
self.__stopOrderId=orderId
self.__stopOrder=order
else:
LOG.error("Can't place stop order %s" % order)
def __sellIfMeet(self, tick):
''' place sell order if conditions meet '''
pass
def orderExecuted(self, orderId):
''' call back for executed order '''
if orderId == self.__stopOrderId:
LOG.debug("smaStrategy stop order canceled %s" % orderId)
# stop order executed
self.__clearStopOrder()
def __clearStopOrder(self):
''' clear stop order status '''
self.__stopOrderId=None
self.__stopOrder=None
def __adjustStopOrder(self, tick):
''' update stop order if needed '''
if not self.__stopOrderId:
return
if self.__stopOrder.action == Action.SELL:
orgStopPrice=self.__buyOrder.price * 0.95
newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85)
newStopPrice=min(newStopPrice, tick.close * 0.95)
if newStopPrice > self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
elif self.__stopOrder.action == Action.BUY_TO_COVER:
orgStopPrice=self.__buyOrder.price * 1.05
newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15)
newStopPrice=max(newStopPrice, tick.close * 1.05)
if newStopPrice < self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
type=Type.STOP,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
def __updatePreviousState(self, tick):
''' update previous state '''
self.__previousTick=tick
self.__previousSmaShort=self.__smaShort.getLastValue()
self.__previousSmaMid=self.__smaMid.getLastValue()
self.__previousSmaLong=self.__smaLong.getLastValue()
self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue()
self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue()
self.__previousMovingLowShort=self.__movingLowShort.getLastValue()
self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue()
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__security, tick.time, tick.close))
# update sma
self.__smaShort(tick.close)
self.__smaMid(tick.close)
self.__smaLong(tick.close)
self.__smaVolumeShort(tick.volume)
self.__smaVolumeMid(tick.volume)
self.__movingLowShort(tick.close)
self.__movingLowWeek(tick.close)
# if not enough data, skip to reduce risk -- SKIP NEWLY IPOs
if not self.__smaLong.getLastValue() or not self.__smaMid.getLastValue() or not self.__smaShort.getLastValue():
self.__updatePreviousState(tick)
return
# if haven't started, don't do any trading
if tick.time <= self.start_date:
return
# already have some holdings
if self.__stopOrderId:
self.__sellIfMeet(tick)
self.__adjustStopOrder(tick)
# don't have any holdings
if not self.__stopOrderId and self.__getCashToBuyStock():
self.__buyIfMeet(tick)
self.__updatePreviousState(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | OneTraker.__placeStopOrder | python | def __placeStopOrder(self, order):
''' place stop order '''
orderId=self.__strategy.placeOrder(order)
if orderId:
self.__stopOrderId=orderId
self.__stopOrder=order
else:
LOG.error("Can't place stop order %s" % order) | place stop order | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L184-L191 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, security, strategy, buying_ratio):
''' constructor '''
self.__security=security
self.__strategy=strategy
self.start_date=strategy.start_date
self.buying_ratio=buying_ratio
# order id
self.__stopOrderId=None
self.__stopOrder=None
self.__buyOrder=None
self.__smaShort=Sma(10)
self.__smaMid=Sma(60)
self.__smaLong=Sma(200)
self.__smaVolumeShort=Sma(10)
self.__smaVolumeMid=Sma(60)
self.__movingLowShort=MovingLow(10)
self.__movingLowWeek=MovingLow(3)
# state of previous day
self.__previousTick=None
self.__previousSmaShort=None
self.__previousMovingLowShort=None
self.__previousMovingLowWeek=None
self.__previousSmaMid=None
self.__previousSmaLong=None
self.__previousSmaVolumeShort=None
self.__previousSmaVolumeMid=None
def __buyIfMeet(self, tick):
''' place buy order if conditions meet '''
# place short sell order
'''
if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()):
if tick.close/self.__previousMovingLowWeek < 0.95:
return
if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1):
# assume no commission fee for now
self.__placeSellShortOrder(tick)
elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue():
# assume no commission fee for now
self.__placeSellShortOrder(tick)
'''
# place buy order
if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()):
if tick.close / self.__previousMovingLowWeek > 1.05:
return
if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __sellIfMeet(self, tick):
''' place sell order if conditions meet '''
pass
def orderExecuted(self, orderId):
''' call back for executed order '''
if orderId == self.__stopOrderId:
LOG.debug("smaStrategy stop order canceled %s" % orderId)
# stop order executed
self.__clearStopOrder()
def __clearStopOrder(self):
''' clear stop order status '''
self.__stopOrderId=None
self.__stopOrder=None
def __adjustStopOrder(self, tick):
''' update stop order if needed '''
if not self.__stopOrderId:
return
if self.__stopOrder.action == Action.SELL:
orgStopPrice=self.__buyOrder.price * 0.95
newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85)
newStopPrice=min(newStopPrice, tick.close * 0.95)
if newStopPrice > self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
elif self.__stopOrder.action == Action.BUY_TO_COVER:
orgStopPrice=self.__buyOrder.price * 1.05
newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15)
newStopPrice=max(newStopPrice, tick.close * 1.05)
if newStopPrice < self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
type=Type.STOP,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
def __updatePreviousState(self, tick):
''' update previous state '''
self.__previousTick=tick
self.__previousSmaShort=self.__smaShort.getLastValue()
self.__previousSmaMid=self.__smaMid.getLastValue()
self.__previousSmaLong=self.__smaLong.getLastValue()
self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue()
self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue()
self.__previousMovingLowShort=self.__movingLowShort.getLastValue()
self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue()
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__security, tick.time, tick.close))
# update sma
self.__smaShort(tick.close)
self.__smaMid(tick.close)
self.__smaLong(tick.close)
self.__smaVolumeShort(tick.volume)
self.__smaVolumeMid(tick.volume)
self.__movingLowShort(tick.close)
self.__movingLowWeek(tick.close)
# if not enough data, skip to reduce risk -- SKIP NEWLY IPOs
if not self.__smaLong.getLastValue() or not self.__smaMid.getLastValue() or not self.__smaShort.getLastValue():
self.__updatePreviousState(tick)
return
# if haven't started, don't do any trading
if tick.time <= self.start_date:
return
# already have some holdings
if self.__stopOrderId:
self.__sellIfMeet(tick)
self.__adjustStopOrder(tick)
# don't have any holdings
if not self.__stopOrderId and self.__getCashToBuyStock():
self.__buyIfMeet(tick)
self.__updatePreviousState(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | OneTraker.orderExecuted | python | def orderExecuted(self, orderId):
''' call back for executed order '''
if orderId == self.__stopOrderId:
LOG.debug("smaStrategy stop order canceled %s" % orderId)
# stop order executed
self.__clearStopOrder() | call back for executed order | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L197-L202 | [
"def __clearStopOrder(self):\n ''' clear stop order status '''\n self.__stopOrderId=None\n self.__stopOrder=None\n"
] | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, security, strategy, buying_ratio):
''' constructor '''
self.__security=security
self.__strategy=strategy
self.start_date=strategy.start_date
self.buying_ratio=buying_ratio
# order id
self.__stopOrderId=None
self.__stopOrder=None
self.__buyOrder=None
self.__smaShort=Sma(10)
self.__smaMid=Sma(60)
self.__smaLong=Sma(200)
self.__smaVolumeShort=Sma(10)
self.__smaVolumeMid=Sma(60)
self.__movingLowShort=MovingLow(10)
self.__movingLowWeek=MovingLow(3)
# state of previous day
self.__previousTick=None
self.__previousSmaShort=None
self.__previousMovingLowShort=None
self.__previousMovingLowWeek=None
self.__previousSmaMid=None
self.__previousSmaLong=None
self.__previousSmaVolumeShort=None
self.__previousSmaVolumeMid=None
def __buyIfMeet(self, tick):
''' place buy order if conditions meet '''
# place short sell order
'''
if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()):
if tick.close/self.__previousMovingLowWeek < 0.95:
return
if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1):
# assume no commission fee for now
self.__placeSellShortOrder(tick)
elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue():
# assume no commission fee for now
self.__placeSellShortOrder(tick)
'''
# place buy order
if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()):
if tick.close / self.__previousMovingLowWeek > 1.05:
return
if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __placeStopOrder(self, order):
''' place stop order '''
orderId=self.__strategy.placeOrder(order)
if orderId:
self.__stopOrderId=orderId
self.__stopOrder=order
else:
LOG.error("Can't place stop order %s" % order)
def __sellIfMeet(self, tick):
''' place sell order if conditions meet '''
pass
def __clearStopOrder(self):
''' clear stop order status '''
self.__stopOrderId=None
self.__stopOrder=None
def __adjustStopOrder(self, tick):
''' update stop order if needed '''
if not self.__stopOrderId:
return
if self.__stopOrder.action == Action.SELL:
orgStopPrice=self.__buyOrder.price * 0.95
newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85)
newStopPrice=min(newStopPrice, tick.close * 0.95)
if newStopPrice > self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
elif self.__stopOrder.action == Action.BUY_TO_COVER:
orgStopPrice=self.__buyOrder.price * 1.05
newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15)
newStopPrice=max(newStopPrice, tick.close * 1.05)
if newStopPrice < self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
type=Type.STOP,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
def __updatePreviousState(self, tick):
''' update previous state '''
self.__previousTick=tick
self.__previousSmaShort=self.__smaShort.getLastValue()
self.__previousSmaMid=self.__smaMid.getLastValue()
self.__previousSmaLong=self.__smaLong.getLastValue()
self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue()
self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue()
self.__previousMovingLowShort=self.__movingLowShort.getLastValue()
self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue()
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__security, tick.time, tick.close))
# update sma
self.__smaShort(tick.close)
self.__smaMid(tick.close)
self.__smaLong(tick.close)
self.__smaVolumeShort(tick.volume)
self.__smaVolumeMid(tick.volume)
self.__movingLowShort(tick.close)
self.__movingLowWeek(tick.close)
# if not enough data, skip to reduce risk -- SKIP NEWLY IPOs
if not self.__smaLong.getLastValue() or not self.__smaMid.getLastValue() or not self.__smaShort.getLastValue():
self.__updatePreviousState(tick)
return
# if haven't started, don't do any trading
if tick.time <= self.start_date:
return
# already have some holdings
if self.__stopOrderId:
self.__sellIfMeet(tick)
self.__adjustStopOrder(tick)
# don't have any holdings
if not self.__stopOrderId and self.__getCashToBuyStock():
self.__buyIfMeet(tick)
self.__updatePreviousState(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | OneTraker.__adjustStopOrder | python | def __adjustStopOrder(self, tick):
''' update stop order if needed '''
if not self.__stopOrderId:
return
if self.__stopOrder.action == Action.SELL:
orgStopPrice=self.__buyOrder.price * 0.95
newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85)
newStopPrice=min(newStopPrice, tick.close * 0.95)
if newStopPrice > self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
elif self.__stopOrder.action == Action.BUY_TO_COVER:
orgStopPrice=self.__buyOrder.price * 1.05
newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15)
newStopPrice=max(newStopPrice, tick.close * 1.05)
if newStopPrice < self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
type=Type.STOP,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
''' | update stop order if needed | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L209-L243 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, security, strategy, buying_ratio):
''' constructor '''
self.__security=security
self.__strategy=strategy
self.start_date=strategy.start_date
self.buying_ratio=buying_ratio
# order id
self.__stopOrderId=None
self.__stopOrder=None
self.__buyOrder=None
self.__smaShort=Sma(10)
self.__smaMid=Sma(60)
self.__smaLong=Sma(200)
self.__smaVolumeShort=Sma(10)
self.__smaVolumeMid=Sma(60)
self.__movingLowShort=MovingLow(10)
self.__movingLowWeek=MovingLow(3)
# state of previous day
self.__previousTick=None
self.__previousSmaShort=None
self.__previousMovingLowShort=None
self.__previousMovingLowWeek=None
self.__previousSmaMid=None
self.__previousSmaLong=None
self.__previousSmaVolumeShort=None
self.__previousSmaVolumeMid=None
def __buyIfMeet(self, tick):
''' place buy order if conditions meet '''
# place short sell order
'''
if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()):
if tick.close/self.__previousMovingLowWeek < 0.95:
return
if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1):
# assume no commission fee for now
self.__placeSellShortOrder(tick)
elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue():
# assume no commission fee for now
self.__placeSellShortOrder(tick)
'''
# place buy order
if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()):
if tick.close / self.__previousMovingLowWeek > 1.05:
return
if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __placeStopOrder(self, order):
''' place stop order '''
orderId=self.__strategy.placeOrder(order)
if orderId:
self.__stopOrderId=orderId
self.__stopOrder=order
else:
LOG.error("Can't place stop order %s" % order)
def __sellIfMeet(self, tick):
''' place sell order if conditions meet '''
pass
def orderExecuted(self, orderId):
''' call back for executed order '''
if orderId == self.__stopOrderId:
LOG.debug("smaStrategy stop order canceled %s" % orderId)
# stop order executed
self.__clearStopOrder()
def __clearStopOrder(self):
''' clear stop order status '''
self.__stopOrderId=None
self.__stopOrder=None
def __updatePreviousState(self, tick):
''' update previous state '''
self.__previousTick=tick
self.__previousSmaShort=self.__smaShort.getLastValue()
self.__previousSmaMid=self.__smaMid.getLastValue()
self.__previousSmaLong=self.__smaLong.getLastValue()
self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue()
self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue()
self.__previousMovingLowShort=self.__movingLowShort.getLastValue()
self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue()
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__security, tick.time, tick.close))
# update sma
self.__smaShort(tick.close)
self.__smaMid(tick.close)
self.__smaLong(tick.close)
self.__smaVolumeShort(tick.volume)
self.__smaVolumeMid(tick.volume)
self.__movingLowShort(tick.close)
self.__movingLowWeek(tick.close)
# if not enough data, skip to reduce risk -- SKIP NEWLY IPOs
if not self.__smaLong.getLastValue() or not self.__smaMid.getLastValue() or not self.__smaShort.getLastValue():
self.__updatePreviousState(tick)
return
# if haven't started, don't do any trading
if tick.time <= self.start_date:
return
# already have some holdings
if self.__stopOrderId:
self.__sellIfMeet(tick)
self.__adjustStopOrder(tick)
# don't have any holdings
if not self.__stopOrderId and self.__getCashToBuyStock():
self.__buyIfMeet(tick)
self.__updatePreviousState(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | OneTraker.__updatePreviousState | python | def __updatePreviousState(self, tick):
''' update previous state '''
self.__previousTick=tick
self.__previousSmaShort=self.__smaShort.getLastValue()
self.__previousSmaMid=self.__smaMid.getLastValue()
self.__previousSmaLong=self.__smaLong.getLastValue()
self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue()
self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue()
self.__previousMovingLowShort=self.__movingLowShort.getLastValue()
self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue() | update previous state | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L245-L254 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, security, strategy, buying_ratio):
''' constructor '''
self.__security=security
self.__strategy=strategy
self.start_date=strategy.start_date
self.buying_ratio=buying_ratio
# order id
self.__stopOrderId=None
self.__stopOrder=None
self.__buyOrder=None
self.__smaShort=Sma(10)
self.__smaMid=Sma(60)
self.__smaLong=Sma(200)
self.__smaVolumeShort=Sma(10)
self.__smaVolumeMid=Sma(60)
self.__movingLowShort=MovingLow(10)
self.__movingLowWeek=MovingLow(3)
# state of previous day
self.__previousTick=None
self.__previousSmaShort=None
self.__previousMovingLowShort=None
self.__previousMovingLowWeek=None
self.__previousSmaMid=None
self.__previousSmaLong=None
self.__previousSmaVolumeShort=None
self.__previousSmaVolumeMid=None
def __buyIfMeet(self, tick):
''' place buy order if conditions meet '''
# place short sell order
'''
if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()):
if tick.close/self.__previousMovingLowWeek < 0.95:
return
if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1):
# assume no commission fee for now
self.__placeSellShortOrder(tick)
elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue():
# assume no commission fee for now
self.__placeSellShortOrder(tick)
'''
# place buy order
if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()):
if tick.close / self.__previousMovingLowWeek > 1.05:
return
if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __placeStopOrder(self, order):
''' place stop order '''
orderId=self.__strategy.placeOrder(order)
if orderId:
self.__stopOrderId=orderId
self.__stopOrder=order
else:
LOG.error("Can't place stop order %s" % order)
def __sellIfMeet(self, tick):
''' place sell order if conditions meet '''
pass
def orderExecuted(self, orderId):
''' call back for executed order '''
if orderId == self.__stopOrderId:
LOG.debug("smaStrategy stop order canceled %s" % orderId)
# stop order executed
self.__clearStopOrder()
def __clearStopOrder(self):
''' clear stop order status '''
self.__stopOrderId=None
self.__stopOrder=None
def __adjustStopOrder(self, tick):
''' update stop order if needed '''
if not self.__stopOrderId:
return
if self.__stopOrder.action == Action.SELL:
orgStopPrice=self.__buyOrder.price * 0.95
newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85)
newStopPrice=min(newStopPrice, tick.close * 0.95)
if newStopPrice > self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
elif self.__stopOrder.action == Action.BUY_TO_COVER:
orgStopPrice=self.__buyOrder.price * 1.05
newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15)
newStopPrice=max(newStopPrice, tick.close * 1.05)
if newStopPrice < self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
type=Type.STOP,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__security, tick.time, tick.close))
# update sma
self.__smaShort(tick.close)
self.__smaMid(tick.close)
self.__smaLong(tick.close)
self.__smaVolumeShort(tick.volume)
self.__smaVolumeMid(tick.volume)
self.__movingLowShort(tick.close)
self.__movingLowWeek(tick.close)
# if not enough data, skip to reduce risk -- SKIP NEWLY IPOs
if not self.__smaLong.getLastValue() or not self.__smaMid.getLastValue() or not self.__smaShort.getLastValue():
self.__updatePreviousState(tick)
return
# if haven't started, don't do any trading
if tick.time <= self.start_date:
return
# already have some holdings
if self.__stopOrderId:
self.__sellIfMeet(tick)
self.__adjustStopOrder(tick)
# don't have any holdings
if not self.__stopOrderId and self.__getCashToBuyStock():
self.__buyIfMeet(tick)
self.__updatePreviousState(tick)
|
llazzaro/analyzerstrategies | analyzerstrategies/sma_portfolio_strategy.py | OneTraker.tickUpdate | python | def tickUpdate(self, tick):
''' consume ticks '''
LOG.debug("tickUpdate %s with tick %s, price %s" % (self.__security, tick.time, tick.close))
# update sma
self.__smaShort(tick.close)
self.__smaMid(tick.close)
self.__smaLong(tick.close)
self.__smaVolumeShort(tick.volume)
self.__smaVolumeMid(tick.volume)
self.__movingLowShort(tick.close)
self.__movingLowWeek(tick.close)
# if not enough data, skip to reduce risk -- SKIP NEWLY IPOs
if not self.__smaLong.getLastValue() or not self.__smaMid.getLastValue() or not self.__smaShort.getLastValue():
self.__updatePreviousState(tick)
return
# if haven't started, don't do any trading
if tick.time <= self.start_date:
return
# already have some holdings
if self.__stopOrderId:
self.__sellIfMeet(tick)
self.__adjustStopOrder(tick)
# don't have any holdings
if not self.__stopOrderId and self.__getCashToBuyStock():
self.__buyIfMeet(tick)
self.__updatePreviousState(tick) | consume ticks | train | https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/sma_portfolio_strategy.py#L256-L286 | null | class OneTraker(object):
''' tracker for one stock '''
def __init__(self, security, strategy, buying_ratio):
''' constructor '''
self.__security=security
self.__strategy=strategy
self.start_date=strategy.start_date
self.buying_ratio=buying_ratio
# order id
self.__stopOrderId=None
self.__stopOrder=None
self.__buyOrder=None
self.__smaShort=Sma(10)
self.__smaMid=Sma(60)
self.__smaLong=Sma(200)
self.__smaVolumeShort=Sma(10)
self.__smaVolumeMid=Sma(60)
self.__movingLowShort=MovingLow(10)
self.__movingLowWeek=MovingLow(3)
# state of previous day
self.__previousTick=None
self.__previousSmaShort=None
self.__previousMovingLowShort=None
self.__previousMovingLowWeek=None
self.__previousSmaMid=None
self.__previousSmaLong=None
self.__previousSmaVolumeShort=None
self.__previousSmaVolumeMid=None
def __buyIfMeet(self, tick):
''' place buy order if conditions meet '''
# place short sell order
'''
if (self.__smaShort.getLastValue() < self.__smaLong.getLastValue() or self.__smaMid.getLastValue() < self.__smaLong.getLastValue()):
if tick.close/self.__previousMovingLowWeek < 0.95:
return
if self.__previousSmaShort > self.__previousSmaLong and self.__smaShort.getLastValue() < self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort/1.1):
# assume no commission fee for now
self.__placeSellShortOrder(tick)
elif self.__previousSmaLong > self.__previousSmaShort > self.__previousSmaMid and self.__smaLong.getLastValue() > self.__smaMid.getLastValue() > self.__smaShort.getLastValue():
# assume no commission fee for now
self.__placeSellShortOrder(tick)
'''
# place buy order
if (self.__smaShort.getLastValue() > self.__smaLong.getLastValue() or self.__smaMid.getLastValue() > self.__smaLong.getLastValue()):
if tick.close / self.__previousMovingLowWeek > 1.05:
return
if self.__previousSmaShort < self.__previousSmaLong and self.__smaShort.getLastValue() > self.__smaLong.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
elif self.__previousSmaLong < self.__previousSmaShort < self.__previousSmaMid and self.__smaLong.getLastValue() < self.__smaMid.getLastValue() < self.__smaShort.getLastValue() and self.__previousSmaVolumeMid < (self.__previousSmaVolumeShort / 1.1):
# assume no commission fee for now
self.__placeBuyOrder(tick)
def __placeSellShortOrder(self, tick):
''' place short sell order'''
share=math.floor(self.__strategy.getAccountCopy().getCash() / float(tick.close))
sellShortOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL_SHORT,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(sellShortOrder):
self.__buyOrder=sellShortOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
is_stop=True,
security=self.__security,
price=tick.close * 1.05,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __getCashToBuyStock(self):
''' calculate the amount of money to buy stock '''
account=self.__strategy.getAccountCopy()
if (account.getCash() >= account.getTotalValue() / self.buying_ratio):
return account.getTotalValue() / self.buying_ratio
else:
return 0
def __placeBuyOrder(self, tick):
''' place buy order'''
cash=self.__getCashToBuyStock()
if cash == 0:
return
share=math.floor(cash / float(tick.close))
buyOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY,
is_market=True,
security=self.__security,
share=share)
if self.__strategy.placeOrder(buyOrder):
self.__buyOrder=buyOrder
# place stop order
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=tick.close * 0.95,
share=0 - share)
self.__placeStopOrder(stopOrder)
def __placeStopOrder(self, order):
''' place stop order '''
orderId=self.__strategy.placeOrder(order)
if orderId:
self.__stopOrderId=orderId
self.__stopOrder=order
else:
LOG.error("Can't place stop order %s" % order)
def __sellIfMeet(self, tick):
''' place sell order if conditions meet '''
pass
def orderExecuted(self, orderId):
''' call back for executed order '''
if orderId == self.__stopOrderId:
LOG.debug("smaStrategy stop order canceled %s" % orderId)
# stop order executed
self.__clearStopOrder()
def __clearStopOrder(self):
''' clear stop order status '''
self.__stopOrderId=None
self.__stopOrder=None
def __adjustStopOrder(self, tick):
''' update stop order if needed '''
if not self.__stopOrderId:
return
if self.__stopOrder.action == Action.SELL:
orgStopPrice=self.__buyOrder.price * 0.95
newStopPrice=max(((tick.close + orgStopPrice) / 2), tick.close * 0.85)
newStopPrice=min(newStopPrice, tick.close * 0.95)
if newStopPrice > self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.SELL,
is_stop=True,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
elif self.__stopOrder.action == Action.BUY_TO_COVER:
orgStopPrice=self.__buyOrder.price * 1.05
newStopPrice=min(((orgStopPrice + tick.close) / 2), tick.close * 1.15)
newStopPrice=max(newStopPrice, tick.close * 1.05)
if newStopPrice < self.__stopOrder.price:
self.__strategy.tradingEngine.cancelOrder(self.__security, self.__stopOrderId)
stopOrder=Order(accountId=self.__strategy.accountId,
action=Action.BUY_TO_COVER,
type=Type.STOP,
security=self.__security,
price=newStopPrice,
share=self.__stopOrder.share)
self.__placeStopOrder(stopOrder)
'''
def __updatePreviousState(self, tick):
''' update previous state '''
self.__previousTick=tick
self.__previousSmaShort=self.__smaShort.getLastValue()
self.__previousSmaMid=self.__smaMid.getLastValue()
self.__previousSmaLong=self.__smaLong.getLastValue()
self.__previousSmaVolumeShort=self.__smaVolumeShort.getLastValue()
self.__previousSmaVolumeMid=self.__smaVolumeMid.getLastValue()
self.__previousMovingLowShort=self.__movingLowShort.getLastValue()
self.__previousMovingLowWeek=self.__movingLowWeek.getLastValue()
|
predicador37/pyjstat | pyjstat/pyjstat.py | check_version_2 | python | def check_version_2(dataset):
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False | Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L99-L119 | null | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | unnest_collection | python | def unnest_collection(collection, df_list):
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list) | Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L122-L142 | [
"def request(path):\n \"\"\"Send a request to a given URL accepting JSON format and return a \\\n deserialized Python object.\n\n Args:\n path (str): The URI to be requested.\n\n Returns:\n response: Deserialized JSON Python object.\n\n Raises:\n HTTPError: the HTTP error returned by the requested server.\n InvalidURL: an invalid URL has been requested.\n Exception: generic exception.\n\n \"\"\"\n headers = {'Accept': 'application/json'}\n try:\n requested_object = requests.get(path, headers=headers)\n requested_object.raise_for_status()\n except requests.exceptions.HTTPError as exception:\n LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +\n str(exception.response.status_code) + ' ' +\n str(exception.response.reason) + ' ' + str(path))\n raise\n except requests.exceptions.InvalidURL as exception:\n LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))\n raise\n except Exception:\n import traceback\n LOGGER.error('Generic exception: ' + traceback.format_exc())\n raise\n else:\n response = requested_object.json()\n return response\n",
"def unnest_collection(collection, df_list):\n \"\"\"Unnest collection structure extracting all its datasets and converting \\\n them to Pandas Dataframes.\n\n Args:\n collection (OrderedDict): data in JSON-stat format, previously \\\n deserialized to a python object by \\\n json.load() or json.loads(),\n df_list (list): list variable which will contain the converted \\\n datasets.\n\n Returns:\n Nothing.\n\n \"\"\"\n for item in collection['link']['item']:\n if item['class'] == 'dataset':\n df_list.append(Dataset.read(item['href']).write('dataframe'))\n elif item['class'] == 'collection':\n nested_collection = request(item['href'])\n unnest_collection(nested_collection, df_list)\n",
"def read(cls, data):\n \"\"\"Reads data from URL, Dataframe, JSON string, JSON file or\n OrderedDict.\n Args:\n data: can be a Pandas Dataframe, a JSON file, a JSON string,\n an OrderedDict or a URL pointing to a JSONstat file.\n\n Returns:\n An object of class Dataset populated with data.\n\n \"\"\"\n if isinstance(data, pd.DataFrame):\n return cls((json.loads(\n to_json_stat(data, output='dict', version='2.0'),\n object_pairs_hook=OrderedDict)))\n elif isinstance(data, OrderedDict):\n return cls(data)\n elif (isinstance(data, basestring)\n and data.startswith((\"http://\", \"https://\",\n \"ftp://\", \"ftps://\"))):\n # requests will do the rest...\n return cls(request(data))\n elif isinstance(data, basestring):\n try:\n json_dict = json.loads(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise\n else:\n try:\n json_dict = json.load(data, object_pairs_hook=OrderedDict)\n return cls(json_dict)\n except ValueError:\n raise\n",
"def write(self, output='jsonstat'):\n \"\"\"Writes data from a Dataset object to JSONstat or Pandas Dataframe.\n Args:\n output(string): can accept 'jsonstat' or 'dataframe'. Default to\n 'jsonstat'.\n\n Returns:\n Serialized JSONstat or a Pandas Dataframe,depending on the \\\n 'output' parameter.\n\n \"\"\"\n\n if output == 'jsonstat':\n return json.dumps(OrderedDict(self), cls=NumpyEncoder)\n elif output == 'dataframe':\n return from_json_stat(self)[0]\n else:\n raise ValueError(\"Allowed arguments are 'jsonstat' or 'dataframe'\")\n"
] | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | get_dimensions | python | def get_dimensions(js_dict, naming):
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names | Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L163-L195 | [
"def check_version_2(dataset):\n \"\"\"Checks if json-stat version attribute exists and is equal or greater \\\n than 2.0 for a given dataset.\n\n Args:\n dataset (OrderedDict): data in JSON-stat format, previously \\\n deserialized to a python object by \\\n json.load() or json.loads(),\n\n Returns:\n bool: True if version exists and is equal or greater than 2.0, \\\n False otherwise. For datasets without the version attribute, \\\n always return False.\n\n \"\"\"\n\n if float(dataset.get('version')) >= 2.0 \\\n if dataset.get('version') else False:\n return True\n else:\n return False\n",
"def get_dim_label(js_dict, dim, input=\"dataset\"):\n \"\"\"Get label from a given dimension.\n\n Args:\n js_dict (dict): dictionary containing dataset data and metadata.\n dim (string): dimension name obtained from JSON file.\n\n Returns:\n dim_label(pandas.DataFrame): DataFrame with label-based dimension data.\n\n \"\"\"\n\n if input == 'dataset':\n input = js_dict['dimension'][dim]\n label_col = 'label'\n elif input == 'dimension':\n label_col = js_dict['label']\n input = js_dict\n else:\n raise ValueError\n\n try:\n dim_label = input['category']['label']\n\n except KeyError:\n dim_index = get_dim_index(js_dict, dim)\n dim_label = pd.concat([dim_index['id'],\n dim_index['id']],\n axis=1)\n dim_label.columns = ['id', 'label']\n else:\n dim_label = pd.DataFrame(list(zip(dim_label.keys(),\n dim_label.values())),\n index=dim_label.keys(),\n columns=['id', label_col])\n # index must be added to dim label so that it can be sorted\n try:\n dim_index = input['category']['index']\n except KeyError:\n dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),\n index=[0],\n columns=['id', 'index'])\n else:\n if type(dim_index) is list:\n dim_index = pd.DataFrame(list(zip(dim_index,\n range(0, len(dim_index)))),\n index=dim_index, columns=['id', 'index'])\n else:\n dim_index = pd.DataFrame(list(zip(dim_index.keys(),\n dim_index.values())),\n index=dim_index.keys(),\n columns=['id', 'index'])\n dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')\n return dim_label\n",
"def get_dim_index(js_dict, dim):\n \"\"\"Get index from a given dimension.\n\n Args:\n js_dict (dict): dictionary containing dataset data and metadata.\n dim (string): dimension name obtained from JSON file.\n\n Returns:\n dim_index (pandas.DataFrame): DataFrame with index-based dimension data.\n\n \"\"\"\n\n try:\n dim_index = js_dict['dimension'][dim]['category']['index']\n except KeyError:\n dim_label = get_dim_label(js_dict, dim)\n dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),\n index=[0],\n columns=['id', 'index'])\n else:\n if type(dim_index) is list:\n dim_index = pd.DataFrame(list(zip(dim_index,\n range(0, len(dim_index)))),\n index=dim_index, columns=['id', 'index'])\n else:\n dim_index = pd.DataFrame(list(zip(dim_index.keys(),\n dim_index.values())),\n index=dim_index.keys(),\n columns=['id', 'index'])\n dim_index = dim_index.sort_index(by='index')\n return dim_index\n"
] | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | get_dim_label | python | def get_dim_label(js_dict, dim, input="dataset"):
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label | Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L198-L251 | [
"def get_dim_index(js_dict, dim):\n \"\"\"Get index from a given dimension.\n\n Args:\n js_dict (dict): dictionary containing dataset data and metadata.\n dim (string): dimension name obtained from JSON file.\n\n Returns:\n dim_index (pandas.DataFrame): DataFrame with index-based dimension data.\n\n \"\"\"\n\n try:\n dim_index = js_dict['dimension'][dim]['category']['index']\n except KeyError:\n dim_label = get_dim_label(js_dict, dim)\n dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),\n index=[0],\n columns=['id', 'index'])\n else:\n if type(dim_index) is list:\n dim_index = pd.DataFrame(list(zip(dim_index,\n range(0, len(dim_index)))),\n index=dim_index, columns=['id', 'index'])\n else:\n dim_index = pd.DataFrame(list(zip(dim_index.keys(),\n dim_index.values())),\n index=dim_index.keys(),\n columns=['id', 'index'])\n dim_index = dim_index.sort_index(by='index')\n return dim_index\n"
] | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | get_dim_index | python | def get_dim_index(js_dict, dim):
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index | Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L254-L284 | [
"def get_dim_label(js_dict, dim, input=\"dataset\"):\n \"\"\"Get label from a given dimension.\n\n Args:\n js_dict (dict): dictionary containing dataset data and metadata.\n dim (string): dimension name obtained from JSON file.\n\n Returns:\n dim_label(pandas.DataFrame): DataFrame with label-based dimension data.\n\n \"\"\"\n\n if input == 'dataset':\n input = js_dict['dimension'][dim]\n label_col = 'label'\n elif input == 'dimension':\n label_col = js_dict['label']\n input = js_dict\n else:\n raise ValueError\n\n try:\n dim_label = input['category']['label']\n\n except KeyError:\n dim_index = get_dim_index(js_dict, dim)\n dim_label = pd.concat([dim_index['id'],\n dim_index['id']],\n axis=1)\n dim_label.columns = ['id', 'label']\n else:\n dim_label = pd.DataFrame(list(zip(dim_label.keys(),\n dim_label.values())),\n index=dim_label.keys(),\n columns=['id', label_col])\n # index must be added to dim label so that it can be sorted\n try:\n dim_index = input['category']['index']\n except KeyError:\n dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),\n index=[0],\n columns=['id', 'index'])\n else:\n if type(dim_index) is list:\n dim_index = pd.DataFrame(list(zip(dim_index,\n range(0, len(dim_index)))),\n index=dim_index, columns=['id', 'index'])\n else:\n dim_index = pd.DataFrame(list(zip(dim_index.keys(),\n dim_index.values())),\n index=dim_index.keys(),\n columns=['id', 'index'])\n dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')\n return dim_label\n"
] | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | get_values | python | def get_values(js_dict, value='value'):
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values | Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L287-L315 | null | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | get_df_row | python | def get_df_row(dimensions, naming='label', i=0, record=None):
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop() | Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L318-L349 | [
"def check_input(naming):\n \"\"\"Check and validate input params.\n\n Args:\n naming (string): a string containing the naming type (label or id).\n\n Returns:\n Nothing\n\n Raises:\n ValueError: if the parameter is not in the allowed list.\n\n \"\"\"\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')\n",
"def get_df_row(dimensions, naming='label', i=0, record=None):\n \"\"\"Generate row dimension values for a pandas dataframe.\n\n Args:\n dimensions (list): list of pandas dataframes with dimension labels \\\n generated by get_dim_label or get_dim_index methods.\n naming (string, optional): dimension naming. Possible values: 'label' \\\n or 'id'.\n i (int): dimension list iteration index. Default is 0, it's used in the \\\n recursive calls to the method.\n record (list): list of values representing a pandas dataframe row, \\\n except for the value column. Default is empty, it's used \\\n in the recursive calls to the method.\n\n Yields:\n list: list with pandas dataframe column values except for value column\n\n \"\"\"\n\n check_input(naming)\n if i == 0 or record is None:\n record = []\n for dimension in dimensions[i][naming]:\n record.append(dimension)\n if len(record) == len(dimensions):\n yield record\n\n if i + 1 < len(dimensions):\n for row in get_df_row(dimensions, naming, i + 1, record):\n yield row\n if len(record) == i + 1:\n record.pop()\n"
] | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | uniquify | python | def uniquify(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)] | Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L352-L366 | null | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | generate_df | python | def generate_df(js_dict, naming, value="value"):
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output | Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L369-L393 | [
"def get_dimensions(js_dict, naming):\n \"\"\"Get dimensions from input data.\n\n Args:\n js_dict (dict): dictionary containing dataset data and metadata.\n naming (string, optional): dimension naming. Possible values: 'label' \\\n or 'id'.\n\n Returns:\n dimensions (list): list of pandas data frames with dimension \\\n category data.\n dim_names (list): list of strings with dimension names.\n \"\"\"\n\n dimensions = []\n dim_names = []\n if check_version_2(js_dict):\n dimension_dict = js_dict\n else:\n dimension_dict = js_dict['dimension']\n for dim in dimension_dict['id']:\n dim_name = js_dict['dimension'][dim]['label']\n if not dim_name:\n dim_name = dim\n if naming == 'label':\n dim_label = get_dim_label(js_dict, dim)\n dimensions.append(dim_label)\n dim_names.append(dim_name)\n else:\n dim_index = get_dim_index(js_dict, dim)\n dimensions.append(dim_index)\n dim_names.append(dim)\n return dimensions, dim_names\n",
"def get_values(js_dict, value='value'):\n \"\"\"Get values from input data.\n\n Args:\n js_dict (dict): dictionary containing dataset data and metadata.\n value (string, optional): name of the value column. Defaults to 'value'.\n\n Returns:\n values (list): list of dataset values.\n\n \"\"\"\n\n values = js_dict[value]\n if type(values) is list:\n if type(values[0]) is not dict or tuple:\n return values\n # being not a list of dicts or tuples leaves us with a dict...\n values = {int(key): value for (key, value) in values.items()}\n\n if js_dict.get('size'):\n max_val = np.prod(np.array((js_dict['size'])))\n else:\n max_val = np.prod(np.array((js_dict['dimension']['size'])))\n vals = max_val * [None]\n for (key, value) in values.items():\n vals[key] = value\n\n values = vals\n return values\n",
"def get_df_row(dimensions, naming='label', i=0, record=None):\n \"\"\"Generate row dimension values for a pandas dataframe.\n\n Args:\n dimensions (list): list of pandas dataframes with dimension labels \\\n generated by get_dim_label or get_dim_index methods.\n naming (string, optional): dimension naming. Possible values: 'label' \\\n or 'id'.\n i (int): dimension list iteration index. Default is 0, it's used in the \\\n recursive calls to the method.\n record (list): list of values representing a pandas dataframe row, \\\n except for the value column. Default is empty, it's used \\\n in the recursive calls to the method.\n\n Yields:\n list: list with pandas dataframe column values except for value column\n\n \"\"\"\n\n check_input(naming)\n if i == 0 or record is None:\n record = []\n for dimension in dimensions[i][naming]:\n record.append(dimension)\n if len(record) == len(dimensions):\n yield record\n\n if i + 1 < len(dimensions):\n for row in get_df_row(dimensions, naming, i + 1, record):\n yield row\n if len(record) == i + 1:\n record.pop()\n"
] | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | from_json_stat | python | def from_json_stat(datasets, naming='label', value='value'):
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results | Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L396-L437 | [
"def check_input(naming):\n \"\"\"Check and validate input params.\n\n Args:\n naming (string): a string containing the naming type (label or id).\n\n Returns:\n Nothing\n\n Raises:\n ValueError: if the parameter is not in the allowed list.\n\n \"\"\"\n\n if naming not in ['label', 'id']:\n raise ValueError('naming must be \"label\" or \"id\"')\n",
"def generate_df(js_dict, naming, value=\"value\"):\n \"\"\"Decode JSON-stat dict into pandas.DataFrame object. Helper method \\\n that should be called inside from_json_stat().\n\n Args:\n js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \\\n previously deserialized into a python object by \\\n json.load() or json.loads(), for example.\n naming(string): dimension naming. Possible values: 'label' or 'id.'\n value (string, optional): name of the value column. Defaults to 'value'.\n\n Returns:\n output(DataFrame): pandas.DataFrame with converted data.\n\n \"\"\"\n\n values = []\n dimensions, dim_names = get_dimensions(js_dict, naming)\n values = get_values(js_dict, value=value)\n output = pd.DataFrame([category + [values[i]]\n for i, category in\n enumerate(get_df_row(dimensions, naming))])\n output.columns = dim_names + [value]\n output.index = range(0, len(values))\n return output\n"
] | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | to_json_stat | python | def to_json_stat(input_df, value='value', output='list', version='1.3'):
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder) | Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L440-L531 | null | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def request(path):
"""Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception.
"""
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
predicador37/pyjstat | pyjstat/pyjstat.py | request | python | def request(path):
headers = {'Accept': 'application/json'}
try:
requested_object = requests.get(path, headers=headers)
requested_object.raise_for_status()
except requests.exceptions.HTTPError as exception:
LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +
str(exception.response.status_code) + ' ' +
str(exception.response.reason) + ' ' + str(path))
raise
except requests.exceptions.InvalidURL as exception:
LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))
raise
except Exception:
import traceback
LOGGER.error('Generic exception: ' + traceback.format_exc())
raise
else:
response = requested_object.json()
return response | Send a request to a given URL accepting JSON format and return a \
deserialized Python object.
Args:
path (str): The URI to be requested.
Returns:
response: Deserialized JSON Python object.
Raises:
HTTPError: the HTTP error returned by the requested server.
InvalidURL: an invalid URL has been requested.
Exception: generic exception. | train | https://github.com/predicador37/pyjstat/blob/45d671835a99eb573e1058cd43ce93ac4f85f9fa/pyjstat/pyjstat.py#L534-L568 | null | # -*- coding: utf-8 -*-
"""pyjstat is a python module for JSON-stat formatted data manipulation.
This module allows reading and writing JSON-stat [1]_ format with python,
using data frame structures provided by the widely accepted
pandas library [2]_. The JSON-stat format is a simple lightweight JSON format
for data dissemination. Pyjstat is inspired in rjstat [3]_, a library to read
and write JSON-stat with R, by ajschumacher.
pyjstat is written and maintained by `Miguel Expósito Martín
<https://twitter.com/predicador37>`_ and is distributed under the Apache 2.0
License (see LICENSE file).
.. [1] http://json-stat.org/ for JSON-stat information
.. [2] http://pandas.pydata.org for Python Data Analysis Library information
.. [3] https://github.com/ajschumacher/rjstat for rjstat library information
Example:
Importing a JSON-stat file into a pandas data frame can be done as follows::
import urllib2
import json
import pyjstat
results = pyjstat.from_json_stat(json.load(urllib2.urlopen(
'http://json-stat.org/samples/oecd-canada.json')))
print results
"""
import json
import pandas as pd
import numpy as np
from collections import OrderedDict
import requests
import logging
import inspect
import warnings
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
try:
basestring
except NameError:
basestring = str
class NumpyEncoder(json.JSONEncoder):
"""Custom JSON encoder class for Numpy data types.
"""
def default(self, obj):
if isinstance(obj, np.integer) or isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NumpyEncoder, self).default(obj)
def to_int(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
return int(variable)
except ValueError:
return variable
def to_str(variable):
"""Convert variable to integer or string depending on the case.
Args:
variable (string): a string containing a real string or an integer.
Returns:
variable(int, string): an integer or a string, depending on the content\
of variable.
"""
try:
int(variable)
return str(variable)
except ValueError:
return variable
def check_version_2(dataset):
"""Checks if json-stat version attribute exists and is equal or greater \
than 2.0 for a given dataset.
Args:
dataset (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
Returns:
bool: True if version exists and is equal or greater than 2.0, \
False otherwise. For datasets without the version attribute, \
always return False.
"""
if float(dataset.get('version')) >= 2.0 \
if dataset.get('version') else False:
return True
else:
return False
def unnest_collection(collection, df_list):
"""Unnest collection structure extracting all its datasets and converting \
them to Pandas Dataframes.
Args:
collection (OrderedDict): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(),
df_list (list): list variable which will contain the converted \
datasets.
Returns:
Nothing.
"""
for item in collection['link']['item']:
if item['class'] == 'dataset':
df_list.append(Dataset.read(item['href']).write('dataframe'))
elif item['class'] == 'collection':
nested_collection = request(item['href'])
unnest_collection(nested_collection, df_list)
def check_input(naming):
"""Check and validate input params.
Args:
naming (string): a string containing the naming type (label or id).
Returns:
Nothing
Raises:
ValueError: if the parameter is not in the allowed list.
"""
if naming not in ['label', 'id']:
raise ValueError('naming must be "label" or "id"')
def get_dimensions(js_dict, naming):
"""Get dimensions from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
Returns:
dimensions (list): list of pandas data frames with dimension \
category data.
dim_names (list): list of strings with dimension names.
"""
dimensions = []
dim_names = []
if check_version_2(js_dict):
dimension_dict = js_dict
else:
dimension_dict = js_dict['dimension']
for dim in dimension_dict['id']:
dim_name = js_dict['dimension'][dim]['label']
if not dim_name:
dim_name = dim
if naming == 'label':
dim_label = get_dim_label(js_dict, dim)
dimensions.append(dim_label)
dim_names.append(dim_name)
else:
dim_index = get_dim_index(js_dict, dim)
dimensions.append(dim_index)
dim_names.append(dim)
return dimensions, dim_names
def get_dim_label(js_dict, dim, input="dataset"):
"""Get label from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_label(pandas.DataFrame): DataFrame with label-based dimension data.
"""
if input == 'dataset':
input = js_dict['dimension'][dim]
label_col = 'label'
elif input == 'dimension':
label_col = js_dict['label']
input = js_dict
else:
raise ValueError
try:
dim_label = input['category']['label']
except KeyError:
dim_index = get_dim_index(js_dict, dim)
dim_label = pd.concat([dim_index['id'],
dim_index['id']],
axis=1)
dim_label.columns = ['id', 'label']
else:
dim_label = pd.DataFrame(list(zip(dim_label.keys(),
dim_label.values())),
index=dim_label.keys(),
columns=['id', label_col])
# index must be added to dim label so that it can be sorted
try:
dim_index = input['category']['index']
except KeyError:
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_label = pd.merge(dim_label, dim_index, on='id').sort_index(by='index')
return dim_label
def get_dim_index(js_dict, dim):
"""Get index from a given dimension.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
dim (string): dimension name obtained from JSON file.
Returns:
dim_index (pandas.DataFrame): DataFrame with index-based dimension data.
"""
try:
dim_index = js_dict['dimension'][dim]['category']['index']
except KeyError:
dim_label = get_dim_label(js_dict, dim)
dim_index = pd.DataFrame(list(zip([dim_label['id'][0]], [0])),
index=[0],
columns=['id', 'index'])
else:
if type(dim_index) is list:
dim_index = pd.DataFrame(list(zip(dim_index,
range(0, len(dim_index)))),
index=dim_index, columns=['id', 'index'])
else:
dim_index = pd.DataFrame(list(zip(dim_index.keys(),
dim_index.values())),
index=dim_index.keys(),
columns=['id', 'index'])
dim_index = dim_index.sort_index(by='index')
return dim_index
def get_values(js_dict, value='value'):
"""Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
"""
values = js_dict[value]
if type(values) is list:
if type(values[0]) is not dict or tuple:
return values
# being not a list of dicts or tuples leaves us with a dict...
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array((js_dict['size'])))
else:
max_val = np.prod(np.array((js_dict['dimension']['size'])))
vals = max_val * [None]
for (key, value) in values.items():
vals[key] = value
values = vals
return values
def get_df_row(dimensions, naming='label', i=0, record=None):
"""Generate row dimension values for a pandas dataframe.
Args:
dimensions (list): list of pandas dataframes with dimension labels \
generated by get_dim_label or get_dim_index methods.
naming (string, optional): dimension naming. Possible values: 'label' \
or 'id'.
i (int): dimension list iteration index. Default is 0, it's used in the \
recursive calls to the method.
record (list): list of values representing a pandas dataframe row, \
except for the value column. Default is empty, it's used \
in the recursive calls to the method.
Yields:
list: list with pandas dataframe column values except for value column
"""
check_input(naming)
if i == 0 or record is None:
record = []
for dimension in dimensions[i][naming]:
record.append(dimension)
if len(record) == len(dimensions):
yield record
if i + 1 < len(dimensions):
for row in get_df_row(dimensions, naming, i + 1, record):
yield row
if len(record) == i + 1:
record.pop()
def uniquify(seq):
"""Return unique values in a list in the original order. See: \
http://www.peterbe.com/plog/uniqifiers-benchmark
Args:
seq (list): original list.
Returns:
list: list without duplicates preserving original order.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if x not in seen and not seen_add(x)]
def generate_df(js_dict, naming, value="value"):
"""Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
"""
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
def from_json_stat(datasets, naming='label', value='value'):
"""Decode JSON-stat formatted data into pandas.DataFrame object.
Args:
datasets(OrderedDict, list): data in JSON-stat format, previously \
deserialized to a python object by \
json.load() or json.loads(), for example.\
Both List and OrderedDict are accepted \
as inputs.
naming(string, optional): dimension naming. Possible values: 'label'
or 'id'.Defaults to 'label'.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
results(list): list of pandas.DataFrame with imported data.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use read() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
check_input(naming)
results = []
if type(datasets) is list:
for idx, element in enumerate(datasets):
for dataset in element:
js_dict = datasets[idx][dataset]
results.append(generate_df(js_dict, naming, value))
elif isinstance(datasets, OrderedDict) or type(datasets) is dict or \
isinstance(datasets, Dataset):
if 'class' in datasets:
if datasets['class'] == 'dataset':
js_dict = datasets
results.append(generate_df(js_dict, naming, value))
else: # 1.00 bundle type
for dataset in datasets:
js_dict = datasets[dataset]
results.append(generate_df(js_dict, naming, value))
return results
def to_json_stat(input_df, value='value', output='list', version='1.3'):
"""Encode pandas.DataFrame object into JSON-stat format. The DataFrames
must have exactly one value column.
Args:
df(pandas.DataFrame): pandas data frame (or list of data frames) to
encode.
value (string, optional): name of the value column. Defaults to 'value'.
output(string): accepts two values: 'list' or 'dict'. Produce list of\
dicts or dict of dicts as output.
version(string): desired json-stat version. 2.0 is preferred now.\
Apart from this, only older 1.3 format is accepted,
which is the default parameter in order to preserve
backwards compatibility.
Returns:
output(string): String with JSON-stat object.
"""
warnings.warn(
"Shouldn't use this function anymore! Now use write() methods of"
"Dataset, Collection or Dimension.",
DeprecationWarning
)
data = []
if output == 'list':
result = []
elif output == 'dict':
result = OrderedDict({})
if isinstance(input_df, pd.DataFrame):
data.append(input_df)
else:
data = input_df
for row, dataframe in enumerate(data):
dims = data[row].filter([item for item in data[row].columns.values
if item not in value])
if len(dims.columns.values) != len(set(dims.columns.values)):
raise ValueError('Non-value columns must constitute a unique ID')
dim_names = list(dims)
categories = [{to_int(i):
{"label": to_str(i),
"category":
{"index":
OrderedDict([(to_str(j), to_int(k))
for k, j in enumerate(
uniquify(dims[i]))]),
"label":
OrderedDict([(to_str(j), to_str(j))
for k, j in enumerate(
uniquify(dims[i]))])}}}
for i in dims.columns.values]
if float(version) >= 2.0:
dataset = {"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}
dataset["version"] = version
dataset["class"] = "dataset"
for category in categories:
dataset["dimension"].update(category)
dataset.update({"id": dim_names})
dataset.update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dimension"].update(category)
else:
dataset = {"dataset" +
str(row + 1):
{"dimension": OrderedDict(),
value: [None if np.isnan(x) else x
for x in dataframe[value].values]}}
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
dataset["dataset" + str(row + 1)][
"dimension"].update({"id": dim_names})
dataset["dataset" + str(row + 1)][
"dimension"].update({"size": [len(dims[i].unique())
for i in dims.columns.values]})
for category in categories:
dataset["dataset" + str(row + 1)][
"dimension"].update(category)
if output == 'list':
result.append(dataset)
elif output == 'dict':
result.update(dataset)
else:
result = None
return json.dumps(result, cls=NumpyEncoder)
class Dataset(OrderedDict):
"""A class representing a JSONstat dataset.
"""
def __init__(self, *args, **kwargs):
super(Dataset, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file or
OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON file, a JSON string,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dataset populated with data.
"""
if isinstance(data, pd.DataFrame):
return cls((json.loads(
to_json_stat(data, output='dict', version='2.0'),
object_pairs_hook=OrderedDict)))
elif isinstance(data, OrderedDict):
return cls(data)
elif (isinstance(data, basestring)
and data.startswith(("http://", "https://",
"ftp://", "ftps://"))):
# requests will do the rest...
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'. Default to
'jsonstat'.
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return from_json_stat(self)[0]
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
def get_dimension_index(self, name, value):
"""Converts a dimension ID string and a categody ID string into the \
numeric index of that category in that dimension
Args:
name(string): ID string of the dimension.
value(string): ID string of the category.
Returns:
ndx[value](int): index of the category in the dimension.
"""
if 'index' not in self.get('dimension', {}). \
get(name, {}).get('category', {}):
return 0
ndx = self['dimension'][name]['category']['index']
if isinstance(ndx, list):
return ndx.index(value)
else:
return ndx[value]
def get_dimension_indices(self, query):
"""Converts a dimension/category list of dicts into a list of \
dimensions’ indices.
Args:
query(list): dimension/category list of dicts.
Returns:
indices(list): list of dimensions' indices.
"""
ids = self['id'] if self.get('id') else self['dimension']['id']
indices = []
for idx, id in enumerate(ids):
indices.append(self.get_dimension_index(id,
[d.get(id) for d in query
if id in d][0]))
return indices
def get_value_index(self, indices):
"""Converts a list of dimensions’ indices into a numeric value index.
Args:
indices(list): list of dimension's indices.
Returns:
num(int): numeric value index.
"""
size = self['size'] if self.get('size') else self['dimension']['size']
ndims = len(size)
mult = 1
num = 0
for idx, dim in enumerate(size):
mult *= size[ndims - idx] if (idx > 0) else 1
num += mult * indices[ndims - idx - 1]
return num
def get_value_by_index(self, index):
"""Converts a numeric value index into its data value.
Args:
index(int): numeric value index.
Returns:
self['value'][index](float): Numeric data value.
"""
return self['value'][index]
def get_value(self, query):
"""Converts a dimension/category list of dicts into a data value \
in three steps.
Args:
query(list): list of dicts with the desired query.
Returns:
value(float): numeric data value.
"""
indices = self.get_dimension_indices(query)
index = self.get_value_index(indices)
value = self.get_value_by_index(index)
return value
class Dimension(OrderedDict):
"""A class representing a JSONstat dimension.
"""
def __init__(self, *args, **kwargs):
super(Dimension, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL, Dataframe, JSON string, JSON file
or OrderedDict.
Args:
data: can be a Pandas Dataframe, a JSON string, a JSON file,
an OrderedDict or a URL pointing to a JSONstat file.
Returns:
An object of class Dimension populated with data.
"""
if isinstance(data, pd.DataFrame):
output = OrderedDict({})
output['version'] = '2.0'
output['class'] = 'dimension'
[label] = [x for x in list(data.columns.values) if
x not in ['id', 'index']]
output['label'] = label
output['category'] = OrderedDict({})
output['category']['index'] = data.id.tolist()
output['category']['label'] = OrderedDict(
zip(data.id.values, data[label].values))
return cls(output)
elif isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring) and data.startswith(("http://",
"https://",
"ftp://",
"ftps://")):
return cls(request(data))
elif isinstance(data,basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Dataset object to JSONstat or Pandas Dataframe.
Args:
output(string): can accept 'jsonstat' or 'dataframe'
Returns:
Serialized JSONstat or a Pandas Dataframe,depending on the \
'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(OrderedDict(self), cls=NumpyEncoder)
elif output == 'dataframe':
return get_dim_label(self, self['label'], 'dimension')
else:
raise ValueError("Allowed arguments are 'jsonstat' or 'dataframe'")
class Collection(OrderedDict):
"""A class representing a JSONstat collection.
"""
def __init__(self, *args, **kwargs):
super(Collection, self).__init__(*args, **kwargs)
@classmethod
def read(cls, data):
"""Reads data from URL or OrderedDict.
Args:
data: can be a URL pointing to a JSONstat file, a JSON string
or an OrderedDict.
Returns:
An object of class Collection populated with data.
"""
if isinstance(data, OrderedDict):
return cls(data)
elif isinstance(data, basestring)\
and data.startswith(("http://", "https://", "ftp://", "ftps://")):
return cls(request(data))
elif isinstance(data, basestring):
try:
json_dict = json.loads(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
else:
try:
json_dict = json.load(data, object_pairs_hook=OrderedDict)
return cls(json_dict)
except ValueError:
raise
def write(self, output='jsonstat'):
"""Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
def get(self, element):
"""Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
"""
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.