repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
ThomasChiroux/attowiki | src/attowiki/git_tools.py | reset_to_last_commit | python | def reset_to_last_commit():
try:
repo = Repo()
gitcmd = repo.git
gitcmd.reset(hard=True)
except Exception:
pass | reset a modified file to his last commit status
This method does the same than a ::
$ git reset --hard
Keyword Arguments:
<none>
Returns:
<nothing> | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/git_tools.py#L115-L133 | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""git tools functions used in the project."""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
from datetime import datetime
import os
from git import Repo, InvalidGitRepositoryError
def _delta_dir():
"""returns the relative path of the current directory to the git
repository.
This path will be added the 'filename' path to find the file.
It current_dir is the git root, this function returns an empty string.
Keyword Arguments:
<none>
Returns:
str -- relative path of the current dir to git root dir
empty string if current dir is the git root dir
"""
repo = Repo()
current_dir = os.getcwd()
repo_dir = repo.tree().abspath
delta_dir = current_dir.replace(repo_dir, '')
if delta_dir:
return delta_dir + '/'
else:
return ''
def check_repo():
"""checks is local git repo is present or not
Keywords Arguments:
<none>
Returns:
boolean -- True if git repo is present, False if not
"""
try:
Repo()
except InvalidGitRepositoryError:
return False
return True
def commit(filename):
"""Commit (git) a specified file
This method does the same than a ::
$ git commit -a "message"
Keyword Arguments:
:filename: (str) -- name of the file to commit
Returns:
<nothing>
"""
try:
repo = Repo()
# gitcmd = repo.git
# gitcmd.commit(filename)
index = repo.index
index.commit("Updated file: {0}".format(filename))
except Exception as e:
print("exception while commit: %s" % e.message)
def add_file_to_repo(filename):
"""Add a file to the git repo
This method does the same than a ::
$ git add filename
Keyword Arguments:
:filename: (str) -- name of the file to commit
Returns:
<nothing>
"""
try:
repo = Repo()
index = repo.index
index.add([_delta_dir() + filename])
except Exception as e:
print("exception while gitadding file: %s" % e.message)
def commit_history(filename):
"""Retrieve the commit history for a given filename.
Keyword Arguments:
:filename: (str) -- full name of the file
Returns:
list of dicts -- list of commit
if the file is not found, returns an empty list
"""
result = []
repo = Repo()
for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename):
result.append({'date':
datetime.fromtimestamp(commit.committed_date +
commit.committer_tz_offset),
'hexsha': commit.hexsha})
return result
def read_committed_file(gitref, filename):
"""Retrieve the content of a file in an old commit and returns it.
Ketword Arguments:
:gitref: (str) -- full reference of the git commit
:filename: (str) -- name (full path) of the file
Returns:
str -- content of the file
"""
repo = Repo()
commitobj = repo.commit(gitref)
blob = commitobj.tree[_delta_dir() + filename]
return blob.data_stream.read()
|
ThomasChiroux/attowiki | src/attowiki/git_tools.py | commit_history | python | def commit_history(filename):
result = []
repo = Repo()
for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename):
result.append({'date':
datetime.fromtimestamp(commit.committed_date +
commit.committer_tz_offset),
'hexsha': commit.hexsha})
return result | Retrieve the commit history for a given filename.
Keyword Arguments:
:filename: (str) -- full name of the file
Returns:
list of dicts -- list of commit
if the file is not found, returns an empty list | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/git_tools.py#L136-L153 | [
"def _delta_dir():\n \"\"\"returns the relative path of the current directory to the git\n repository.\n This path will be added the 'filename' path to find the file.\n It current_dir is the git root, this function returns an empty string.\n\n Keyword Arguments:\n <none>\n\n Returns:\n str -- relative path of the current dir to git root dir\n empty string if current dir is the git root dir\n \"\"\"\n repo = Repo()\n current_dir = os.getcwd()\n repo_dir = repo.tree().abspath\n delta_dir = current_dir.replace(repo_dir, '')\n if delta_dir:\n return delta_dir + '/'\n else:\n return ''\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""git tools functions used in the project."""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
from datetime import datetime
import os
from git import Repo, InvalidGitRepositoryError
def _delta_dir():
"""returns the relative path of the current directory to the git
repository.
This path will be added the 'filename' path to find the file.
It current_dir is the git root, this function returns an empty string.
Keyword Arguments:
<none>
Returns:
str -- relative path of the current dir to git root dir
empty string if current dir is the git root dir
"""
repo = Repo()
current_dir = os.getcwd()
repo_dir = repo.tree().abspath
delta_dir = current_dir.replace(repo_dir, '')
if delta_dir:
return delta_dir + '/'
else:
return ''
def check_repo():
"""checks is local git repo is present or not
Keywords Arguments:
<none>
Returns:
boolean -- True if git repo is present, False if not
"""
try:
Repo()
except InvalidGitRepositoryError:
return False
return True
def commit(filename):
"""Commit (git) a specified file
This method does the same than a ::
$ git commit -a "message"
Keyword Arguments:
:filename: (str) -- name of the file to commit
Returns:
<nothing>
"""
try:
repo = Repo()
# gitcmd = repo.git
# gitcmd.commit(filename)
index = repo.index
index.commit("Updated file: {0}".format(filename))
except Exception as e:
print("exception while commit: %s" % e.message)
def add_file_to_repo(filename):
"""Add a file to the git repo
This method does the same than a ::
$ git add filename
Keyword Arguments:
:filename: (str) -- name of the file to commit
Returns:
<nothing>
"""
try:
repo = Repo()
index = repo.index
index.add([_delta_dir() + filename])
except Exception as e:
print("exception while gitadding file: %s" % e.message)
def reset_to_last_commit():
"""reset a modified file to his last commit status
This method does the same than a ::
$ git reset --hard
Keyword Arguments:
<none>
Returns:
<nothing>
"""
try:
repo = Repo()
gitcmd = repo.git
gitcmd.reset(hard=True)
except Exception:
pass
def read_committed_file(gitref, filename):
"""Retrieve the content of a file in an old commit and returns it.
Ketword Arguments:
:gitref: (str) -- full reference of the git commit
:filename: (str) -- name (full path) of the file
Returns:
str -- content of the file
"""
repo = Repo()
commitobj = repo.commit(gitref)
blob = commitobj.tree[_delta_dir() + filename]
return blob.data_stream.read()
|
ThomasChiroux/attowiki | src/attowiki/git_tools.py | read_committed_file | python | def read_committed_file(gitref, filename):
repo = Repo()
commitobj = repo.commit(gitref)
blob = commitobj.tree[_delta_dir() + filename]
return blob.data_stream.read() | Retrieve the content of a file in an old commit and returns it.
Ketword Arguments:
:gitref: (str) -- full reference of the git commit
:filename: (str) -- name (full path) of the file
Returns:
str -- content of the file | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/git_tools.py#L156-L170 | [
"def _delta_dir():\n \"\"\"returns the relative path of the current directory to the git\n repository.\n This path will be added the 'filename' path to find the file.\n It current_dir is the git root, this function returns an empty string.\n\n Keyword Arguments:\n <none>\n\n Returns:\n str -- relative path of the current dir to git root dir\n empty string if current dir is the git root dir\n \"\"\"\n repo = Repo()\n current_dir = os.getcwd()\n repo_dir = repo.tree().abspath\n delta_dir = current_dir.replace(repo_dir, '')\n if delta_dir:\n return delta_dir + '/'\n else:\n return ''\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""git tools functions used in the project."""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
from datetime import datetime
import os
from git import Repo, InvalidGitRepositoryError
def _delta_dir():
"""returns the relative path of the current directory to the git
repository.
This path will be added the 'filename' path to find the file.
It current_dir is the git root, this function returns an empty string.
Keyword Arguments:
<none>
Returns:
str -- relative path of the current dir to git root dir
empty string if current dir is the git root dir
"""
repo = Repo()
current_dir = os.getcwd()
repo_dir = repo.tree().abspath
delta_dir = current_dir.replace(repo_dir, '')
if delta_dir:
return delta_dir + '/'
else:
return ''
def check_repo():
"""checks is local git repo is present or not
Keywords Arguments:
<none>
Returns:
boolean -- True if git repo is present, False if not
"""
try:
Repo()
except InvalidGitRepositoryError:
return False
return True
def commit(filename):
"""Commit (git) a specified file
This method does the same than a ::
$ git commit -a "message"
Keyword Arguments:
:filename: (str) -- name of the file to commit
Returns:
<nothing>
"""
try:
repo = Repo()
# gitcmd = repo.git
# gitcmd.commit(filename)
index = repo.index
index.commit("Updated file: {0}".format(filename))
except Exception as e:
print("exception while commit: %s" % e.message)
def add_file_to_repo(filename):
"""Add a file to the git repo
This method does the same than a ::
$ git add filename
Keyword Arguments:
:filename: (str) -- name of the file to commit
Returns:
<nothing>
"""
try:
repo = Repo()
index = repo.index
index.add([_delta_dir() + filename])
except Exception as e:
print("exception while gitadding file: %s" % e.message)
def reset_to_last_commit():
"""reset a modified file to his last commit status
This method does the same than a ::
$ git reset --hard
Keyword Arguments:
<none>
Returns:
<nothing>
"""
try:
repo = Repo()
gitcmd = repo.git
gitcmd.reset(hard=True)
except Exception:
pass
def commit_history(filename):
"""Retrieve the commit history for a given filename.
Keyword Arguments:
:filename: (str) -- full name of the file
Returns:
list of dicts -- list of commit
if the file is not found, returns an empty list
"""
result = []
repo = Repo()
for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename):
result.append({'date':
datetime.fromtimestamp(commit.committed_date +
commit.committer_tz_offset),
'hexsha': commit.hexsha})
return result
|
ThomasChiroux/attowiki | src/attowiki/pdf.py | produce_pdf | python | def produce_pdf(rst_content=None, doctree_content=None, filename=None):
if filename is None:
filename = os.path.join(
"/tmp", ''.join([random.choice(string.ascii_letters +
string.digits) for n in range(15)]) + '.pdf')
r2p = RstToPdf(stylesheets=['pdf.style'],
style_path=[os.path.join(os.path.dirname(__file__),
'styles')],
breaklevel=0,
splittables=True,
footer="""###Title### - ###Page###/###Total###""")
r2p.createPdf(text=rst_content,
doctree=doctree_content,
output=filename)
return filename | produce a pdf content based of a given rst content
If filename is given, it will store the result using the given filename
if no filename is given, it will generate a pdf in /tmp/ with a random
name | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/pdf.py#L29-L49 | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""use rst2pdf to produce a pdf file based on the given rst content
"""
import string
import random
import os
from rst2pdf.createpdf import RstToPdf
|
ThomasChiroux/attowiki | src/attowiki/tools.py | attowiki_distro_path | python | def attowiki_distro_path():
attowiki_path = os.path.abspath(__file__)
if attowiki_path[-1] != '/':
attowiki_path = attowiki_path[:attowiki_path.rfind('/')]
else:
attowiki_path = attowiki_path[:attowiki_path[:-1].rfind('/')]
return attowiki_path | return the absolute complete path where attowiki is located
.. todo:: use pkg_resources ? | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/tools.py#L30-L40 | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""common tools for the project
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import os
|
ThomasChiroux/attowiki | src/attowiki/rst_directives.py | add_node | python | def add_node(node, **kwds):
nodes._add_node_class_names([node.__name__])
for key, val in kwds.iteritems():
try:
visit, depart = val
except ValueError:
raise ValueError('Value for key %r must be a '
'(visit, depart) function tuple' % key)
if key == 'html':
from docutils.writers.html4css1 import HTMLTranslator as translator
elif key == 'latex':
from docutils.writers.latex2e import LaTeXTranslator as translator
else:
# ignore invalid keys for compatibility
continue
setattr(translator, 'visit_'+node.__name__, visit)
if depart:
setattr(translator, 'depart_'+node.__name__, depart) | add_node from Sphinx | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/rst_directives.py#L30-L49 | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""adds restructured text directives to docutils
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils import nodes
class todo(nodes.Admonition, nodes.Element):
"""todo node for docutils"""
pass
def visit_todo(self, node):
self.visit_admonition(node)
def depart_todo(self, node):
self.depart_admonition(node)
class Todo(BaseAdmonition):
"""todo directive for docutils
uses BaseAdmonition from docutils (like .. note:: of .. warning:: etc..)
"""
optional_arguments = 0
node_class = todo
class done(nodes.Admonition, nodes.Element):
"""done node for docutils"""
pass
def visit_done(self, node):
self.visit_admonition(node)
def depart_done(self, node):
self.depart_admonition(node)
class Done(BaseAdmonition):
"""done directive for docutils
uses BaseAdmonition from docutils (like .. note:: of .. warning:: etc..)
"""
optional_arguments = 0
node_class = done
|
ThomasChiroux/attowiki | src/attowiki/views.py | check_user | python | def check_user(user, password):
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None)) | check the auth for user and password. | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L55-L58 | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_meta_index | python | def view_meta_index():
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo()) | List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__ | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L68-L82 | [
"def check_repo():\n \"\"\"checks is local git repo is present or not\n\n Keywords Arguments:\n <none>\n\n Returns:\n boolean -- True if git repo is present, False if not\n \"\"\"\n try:\n Repo()\n except InvalidGitRepositoryError:\n return False\n return True\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_meta_admonition | python | def view_meta_admonition(admonition_name, name=None):
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body']) | List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L86-L207 | [
"def check_repo():\n \"\"\"checks is local git repo is present or not\n\n Keywords Arguments:\n <none>\n\n Returns:\n boolean -- True if git repo is present, False if not\n \"\"\"\n try:\n Repo()\n except InvalidGitRepositoryError:\n return False\n return True\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_cancel_edit | python | def view_cancel_edit(name=None):
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404) | Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L211-L235 | [
"def reset_to_last_commit():\n \"\"\"reset a modified file to his last commit status\n\n This method does the same than a ::\n\n $ git reset --hard\n\n Keyword Arguments:\n <none>\n\n Returns:\n <nothing>\n \"\"\"\n try:\n repo = Repo()\n gitcmd = repo.git\n gitcmd.reset(hard=True)\n except Exception:\n pass\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_edit | python | def view_edit(name=None):
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404) | Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L239-L279 | [
"def check_repo():\n \"\"\"checks is local git repo is present or not\n\n Keywords Arguments:\n <none>\n\n Returns:\n boolean -- True if git repo is present, False if not\n \"\"\"\n try:\n Repo()\n except InvalidGitRepositoryError:\n return False\n return True\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_pdf | python | def view_pdf(name=None):
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404) | Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L283-L310 | [
"def produce_pdf(rst_content=None, doctree_content=None, filename=None):\n \"\"\"produce a pdf content based of a given rst content\n\n If filename is given, it will store the result using the given filename\n if no filename is given, it will generate a pdf in /tmp/ with a random\n name\n \"\"\"\n if filename is None:\n filename = os.path.join(\n \"/tmp\", ''.join([random.choice(string.ascii_letters +\n string.digits) for n in range(15)]) + '.pdf')\n r2p = RstToPdf(stylesheets=['pdf.style'],\n style_path=[os.path.join(os.path.dirname(__file__),\n 'styles')],\n breaklevel=0,\n splittables=True,\n footer=\"\"\"###Title### - ###Page###/###Total###\"\"\")\n r2p.createPdf(text=rst_content,\n doctree=doctree_content,\n output=filename)\n return filename\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_page | python | def view_page(name=None):
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '') | Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L314-L378 | [
"def check_repo():\n \"\"\"checks is local git repo is present or not\n\n Keywords Arguments:\n <none>\n\n Returns:\n boolean -- True if git repo is present, False if not\n \"\"\"\n try:\n Repo()\n except InvalidGitRepositoryError:\n return False\n return True\n",
"def commit(filename):\n \"\"\"Commit (git) a specified file\n\n This method does the same than a ::\n\n $ git commit -a \"message\"\n\n Keyword Arguments:\n :filename: (str) -- name of the file to commit\n\n Returns:\n <nothing>\n \"\"\"\n try:\n repo = Repo()\n # gitcmd = repo.git\n # gitcmd.commit(filename)\n index = repo.index\n index.commit(\"Updated file: {0}\".format(filename))\n except Exception as e:\n print(\"exception while commit: %s\" % e.message)\n",
"def add_file_to_repo(filename):\n \"\"\"Add a file to the git repo\n\n This method does the same than a ::\n\n $ git add filename\n\n Keyword Arguments:\n :filename: (str) -- name of the file to commit\n\n Returns:\n <nothing>\n \"\"\"\n try:\n repo = Repo()\n index = repo.index\n index.add([_delta_dir() + filename])\n except Exception as e:\n print(\"exception while gitadding file: %s\" % e.message)\n",
"def commit_history(filename):\n \"\"\"Retrieve the commit history for a given filename.\n\n Keyword Arguments:\n :filename: (str) -- full name of the file\n\n Returns:\n list of dicts -- list of commit\n if the file is not found, returns an empty list\n \"\"\"\n result = []\n repo = Repo()\n for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename):\n result.append({'date':\n datetime.fromtimestamp(commit.committed_date +\n commit.committer_tz_offset),\n 'hexsha': commit.hexsha})\n return result\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_history | python | def view_history(name, gitref):
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404) | Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L382-L414 | [
"def check_repo():\n \"\"\"checks is local git repo is present or not\n\n Keywords Arguments:\n <none>\n\n Returns:\n boolean -- True if git repo is present, False if not\n \"\"\"\n try:\n Repo()\n except InvalidGitRepositoryError:\n return False\n return True\n",
"def commit_history(filename):\n \"\"\"Retrieve the commit history for a given filename.\n\n Keyword Arguments:\n :filename: (str) -- full name of the file\n\n Returns:\n list of dicts -- list of commit\n if the file is not found, returns an empty list\n \"\"\"\n result = []\n repo = Repo()\n for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename):\n result.append({'date':\n datetime.fromtimestamp(commit.committed_date +\n commit.committer_tz_offset),\n 'hexsha': commit.hexsha})\n return result\n",
"def read_committed_file(gitref, filename):\n \"\"\"Retrieve the content of a file in an old commit and returns it.\n\n Ketword Arguments:\n :gitref: (str) -- full reference of the git commit\n :filename: (str) -- name (full path) of the file\n\n Returns:\n str -- content of the file\n \"\"\"\n repo = Repo()\n commitobj = repo.commit(gitref)\n\n blob = commitobj.tree[_delta_dir() + filename]\n return blob.data_stream.read()\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_history_source | python | def view_history_source(name, gitref=None):
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404) | Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L418-L458 | [
"def check_repo():\n \"\"\"checks is local git repo is present or not\n\n Keywords Arguments:\n <none>\n\n Returns:\n boolean -- True if git repo is present, False if not\n \"\"\"\n try:\n Repo()\n except InvalidGitRepositoryError:\n return False\n return True\n",
"def commit_history(filename):\n \"\"\"Retrieve the commit history for a given filename.\n\n Keyword Arguments:\n :filename: (str) -- full name of the file\n\n Returns:\n list of dicts -- list of commit\n if the file is not found, returns an empty list\n \"\"\"\n result = []\n repo = Repo()\n for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename):\n result.append({'date':\n datetime.fromtimestamp(commit.committed_date +\n commit.committer_tz_offset),\n 'hexsha': commit.hexsha})\n return result\n",
"def read_committed_file(gitref, filename):\n \"\"\"Retrieve the content of a file in an old commit and returns it.\n\n Ketword Arguments:\n :gitref: (str) -- full reference of the git commit\n :filename: (str) -- name (full path) of the file\n\n Returns:\n str -- content of the file\n \"\"\"\n repo = Repo()\n commitobj = repo.commit(gitref)\n\n blob = commitobj.tree[_delta_dir() + filename]\n return blob.data_stream.read()\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_history_diff | python | def view_history_diff(name, gitref):
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404) | Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L462-L504 | [
"def check_repo():\n \"\"\"checks is local git repo is present or not\n\n Keywords Arguments:\n <none>\n\n Returns:\n boolean -- True if git repo is present, False if not\n \"\"\"\n try:\n Repo()\n except InvalidGitRepositoryError:\n return False\n return True\n",
"def commit_history(filename):\n \"\"\"Retrieve the commit history for a given filename.\n\n Keyword Arguments:\n :filename: (str) -- full name of the file\n\n Returns:\n list of dicts -- list of commit\n if the file is not found, returns an empty list\n \"\"\"\n result = []\n repo = Repo()\n for commit in repo.head.commit.iter_parents(paths=_delta_dir() + filename):\n result.append({'date':\n datetime.fromtimestamp(commit.committed_date +\n commit.committer_tz_offset),\n 'hexsha': commit.hexsha})\n return result\n",
"def read_committed_file(gitref, filename):\n \"\"\"Retrieve the content of a file in an old commit and returns it.\n\n Ketword Arguments:\n :gitref: (str) -- full reference of the git commit\n :filename: (str) -- name (full path) of the file\n\n Returns:\n str -- content of the file\n \"\"\"\n repo = Repo()\n commitobj = repo.commit(gitref)\n\n blob = commitobj.tree[_delta_dir() + filename]\n return blob.data_stream.read()\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
@auth_basic(check_user)
def view_quick_save_page(name=None):
"""Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK)
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404)
|
ThomasChiroux/attowiki | src/attowiki/views.py | view_quick_save_page | python | def view_quick_save_page(name=None):
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if request.method == 'PUT':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
content = request.body.read()
content = content.decode('utf-8')
file_handle.write(content.encode('utf-8'))
file_handle.close()
return "OK"
else:
return abort(404) | Quick save a page.
.. note:: this is a bottle view
* this view must be called with the PUT method
write the new page content to the file, and not not commit or redirect
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
Returns:
bottle response object (200 OK) | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/views.py#L508-L539 | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""bottle views for attowiki
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
import glob
import datetime
import difflib
import time # for profiling
# dependencies imports
from bottle import auth_basic
from bottle import request, response, template, abort, redirect, static_file
import docutils
from docutils.core import publish_parts, publish_doctree
# from docutils.writers.html4css1 import Writer as HisWriter
from writer import AttowikiWriter
from docutils import io, nodes
# project imports
import attowiki
from attowiki.rst_directives import todo, done
from attowiki.git_tools import (check_repo, commit,
reset_to_last_commit,
add_file_to_repo,
commit_history,
read_committed_file)
from attowiki.pdf import produce_pdf
def check_user(user, password):
"""check the auth for user and password."""
return ((user == attowiki.user or attowiki.user is None) and
(password == attowiki.password or attowiki.password is None))
def view_meta_cheat_sheet():
"""Display a cheat sheet of reST syntax."""
response.set_header('Content-Type', 'text/plain')
return template('rst_cheat_sheet')
@auth_basic(check_user)
def view_meta_index():
"""List all the available .rst files in the directory.
view_meta_index is called by the 'meta' url : /__index__
"""
rst_files = [filename[2:-4] for filename in sorted(glob.glob("./*.rst"))]
rst_files.reverse()
return template('index',
type="view",
filelist=rst_files,
name="__index__",
extended_name=None,
history=[],
gitref=None,
is_repo=check_repo())
@auth_basic(check_user)
def view_meta_admonition(admonition_name, name=None):
"""List all found admonition from all the rst files found in directory.
view_meta_admonition is called by the 'meta' url: /__XXXXXXX__
where XXXXXXX represents and admonition name, like:
* todo
* warning
* danger
* ...
.. note:: this function may works for any docutils node, not only
admonition
Keyword Arguments:
:admonition_name: (str) -- name of the admonition
"""
print("meta admo: %s - %s" % (admonition_name, name))
admonition = None
if admonition_name == 'todo':
admonition = todo
elif admonition_name == 'done':
admonition = done
elif hasattr(nodes, admonition_name):
admonition = getattr(nodes, admonition_name)
else:
return abort(404)
doc2_content = ""
doc2_output, doc2_pub = docutils.core.publish_programmatically(
source_class=io.StringInput,
source=doc2_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=AttowikiWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
section1 = nodes.section("{0}_list_file".format(admonition_name))
doc2_pub.reader.document.append(section1)
title1 = nodes.title("{0} LIST".format(admonition_name.upper()),
"{0} LIST".format(admonition_name.upper()))
doc2_pub.reader.document.append(title1)
if name is None:
rst_files = [filename[2:-4] for filename in sorted(
glob.glob("./*.rst"))]
rst_files.reverse()
else:
rst_files = [filename[2:-4] for filename in
sorted(glob.glob("./{0}.rst".format(name)))]
for file in rst_files:
file_title = False
file_handle = open(file + '.rst', 'r')
file_content = file_handle.read()
file_handle.close()
file_content = file_content.decode('utf-8')
output, pub = docutils.core.publish_programmatically(
source_class=io.StringInput, source=file_content,
source_path=None,
destination_class=io.StringOutput,
destination=None, destination_path=None,
reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext',
writer=None, writer_name='html',
settings=None, settings_spec=None,
settings_overrides=None,
config_section=None,
enable_exit_status=False)
my_settings = pub.get_settings()
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document('test', my_settings)
parser.parse(file_content, document)
for node in document.traverse(admonition):
if not file_title:
file_title = True
# new section
section2 = nodes.section(file)
doc2_pub.reader.document.append(section2)
# add link to the originating file
paragraph = nodes.paragraph()
file_target = nodes.target(ids=[file],
names=[file],
refuri="/" + file)
file_ref = nodes.reference(file, file,
name=file,
refuri="/" + file)
paragraph.append(nodes.Text("in "))
paragraph.append(file_ref)
paragraph.append(file_target)
paragraph.append(nodes.Text(":"))
doc2_pub.reader.document.append(paragraph)
# doc2_pub.reader.document.append(file_target)
doc2_pub.reader.document.append(node)
doc2_pub.apply_transforms()
doc2_pub.writer.write(doc2_pub.document, doc2_pub.destination)
doc2_pub.writer.assemble_parts()
if name is None:
display_file_name = '__{0}__'.format(admonition_name)
extended_name = None
else:
display_file_name = '{0}'.format(name)
extended_name = '__{0}__'.format(admonition_name)
return template('page',
type="view",
name=display_file_name,
extended_name=extended_name,
is_repo=check_repo(),
history=[],
gitref=None,
content=doc2_pub.writer.parts['html_body'])
@auth_basic(check_user)
def view_cancel_edit(name=None):
"""Cancel the edition of an existing page.
Then render the last modification status
.. note:: this is a bottle view
if no page name is given, do nothing (it may leave some .tmp. files in
the directory).
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
if name is None:
return redirect('/')
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
reset_to_last_commit()
return redirect('/' + name)
else:
return abort(404)
@auth_basic(check_user)
def view_edit(name=None):
"""Edit or creates a new page.
.. note:: this is a bottle view
if no page name is given, creates a new page.
Keyword Arguments:
:name: (str) -- name of the page (OPTIONAL)
Returns:
bottle response object
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# new page
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content="")
else:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
return template('edit',
type="edit",
name=name,
extended_name=None,
is_repo=check_repo(),
history=[],
gitref=None,
today=datetime.datetime.now().strftime("%Y%m%d"),
content=file_handle.read())
else:
return abort(404)
@auth_basic(check_user)
def view_pdf(name=None):
"""Render a pdf file based on the given page.
.. note:: this is a bottle view
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
MANDATORY
"""
if name is None:
return view_meta_index()
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
dest_filename = name + '.pdf'
doctree = publish_doctree(file_handle.read())
try:
produce_pdf(doctree_content=doctree,
filename=dest_filename)
except:
raise
else:
return static_file(dest_filename,
root='',
download=True)
else:
return abort(404)
@auth_basic(check_user)
def view_page(name=None):
"""Serve a page name.
.. note:: this is a bottle view
* if the view is called with the POST method, write the new page
content to the file, commit the modification and then display the
html rendering of the restructured text file
* if the view is called with the GET method, directly display the html
rendering of the restructured text file
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
OPTIONAL
if no filename is given, first try to find a "index.rst" file in the
directory and serve it. If not found, serve the meta page __index__
Returns:
bottle response object
"""
if request.method == 'POST':
if name is None:
# new file
if len(request.forms.filename) > 0:
name = request.forms.filename
if name is not None:
filename = "{0}.rst".format(name)
file_handle = open(filename, 'w')
file_handle.write(request.forms.content.encode('utf-8'))
file_handle.close()
add_file_to_repo(filename)
commit(filename)
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
if name is None:
# we try to find an index file
index_files = glob.glob("./[Ii][Nn][Dd][Ee][Xx].rst")
if len(index_files) == 0:
# not found
# redirect to __index__
return view_meta_index()
else:
name = index_files[0][2:-4]
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
html_body = publish_parts(file_handle.read(),
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history("{0}.rst".format(name))
return template('page',
type="view",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=None,
content=html_body)
else:
return static_file(name, '')
@auth_basic(check_user)
def view_history(name, gitref):
"""Serve a page name from git repo (an old version of a page).
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
content = read_committed_file(gitref, name + '.rst')
if content:
html_body = publish_parts(content,
writer=AttowikiWriter(),
settings=None,
settings_overrides=None)['html_body']
history = commit_history(name + '.rst')
return template('page',
type="history",
name=name,
extended_name=None,
is_repo=check_repo(),
history=history,
gitref=gitref,
content=html_body)
else:
return abort(404)
@auth_basic(check_user)
def view_history_source(name, gitref=None):
"""Serve a page name from git repo (an old version of a page).
then return the reST source code
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
if gitref is None:
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
content = file_handle.read()
else:
return abort(404)
else:
content = read_committed_file(gitref, name + '.rst')
if content:
return template('source_view',
type="history",
name=name,
extended_name='__source__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=content.decode('utf-8'))
else:
return abort(404)
@auth_basic(check_user)
def view_history_diff(name, gitref):
"""Serve a page name from git repo (an old version of a page).
then return the diff between current source and the old commited source
This function does not use any template it returns only plain text
.. note:: this is a bottle view
* this is a GET only method : you can not change a committed page
Keyword Arguments:
:name: (str) -- name of the rest file (without the .rst extension)
:gitref: (str) -- hexsha of the git commit to look into
Returns:
bottle response object or 404 error page
"""
response.set_header('Cache-control', 'no-cache')
response.set_header('Pragma', 'no-cache')
response.set_header('Content-Type', 'text/html; charset=utf-8')
old_content = read_committed_file(gitref, name + '.rst')
if old_content:
old_content = old_content.decode('utf-8')
files = glob.glob("{0}.rst".format(name))
if len(files) > 0:
file_handle = open(files[0], 'r')
current_content = file_handle.read().decode('utf-8')
differ = difflib.Differ()
result = list(differ.compare(old_content.splitlines(),
current_content.splitlines()))
return template('diff_view',
type="history",
name=name,
extended_name='__diff__',
is_repo=check_repo(),
history=commit_history("{0}.rst".format(name)),
gitref=gitref,
content=result)
else:
return abort(404)
else:
return abort(404)
@auth_basic(check_user)
|
ThomasChiroux/attowiki | src/attowiki/main.py | main | python | def main():
# register specific rst directives
# small trick here: get_language will reveal languages.en
labels = languages.get_language('en').labels
# add the label
languages.en.labels["todo"] = "Todo"
# add node
add_node(todo,
html=(visit_todo, depart_todo),
latex=(visit_todo, depart_todo),
text=(visit_todo, depart_todo))
# nodes._add_node_class_names(['todo'])
# register the new directive todo
directives.register_directive('todo', Todo)
# add the label
languages.en.labels["done"] = "Done"
# add node
add_node(done,
html=(visit_done, depart_done),
latex=(visit_done, depart_done),
text=(visit_done, depart_done))
# nodes._add_node_class_names(['todo'])
# register the new directive todo
directives.register_directive('done', Done)
# Check if the directory is under git, if not, create the repo
try:
Repo()
except InvalidGitRepositoryError:
Repo.init()
# add view path from module localisation
views_path = attowiki_distro_path() + '/views/'
bottle.TEMPLATE_PATH.insert(0, views_path)
app = bottle.Bottle()
# All the Urls of the project
# index or __index__
app.route('/', method='GET')(views.view_page)
# new page
app.route('/', method='POST')(views.view_page)
# meta pages
app.route('/__index__')(views.view_meta_index)
app.route('/__cheatsheet__')(views.view_meta_cheat_sheet)
app.route('/__history__/<gitref>/<name>.__source__')(
views.view_history_source)
app.route('/__history__/<gitref>/<name>.__diff__')(views.view_history_diff)
app.route('/__history__/<gitref>/<name>')(views.view_history)
app.route('/__<admonition_name>__')(views.view_meta_admonition)
# export pdf
app.route('/pdf/<name>')(views.view_pdf)
# new page
app.route('/edit/')(views.view_edit)
# edit an existing page
app.route('/edit/<name>')(views.view_edit)
# cancel the edition of an existing page
app.route('/cancel-edit/')(views.view_cancel_edit)
app.route('/cancel-edit/<name>')(views.view_cancel_edit)
# meta page for one single document
app.route('/<name>.__source__')(views.view_history_source)
app.route('/<name>.__diff__')(views.view_history_diff)
app.route('/<name>.__<admonition_name>__')(views.view_meta_admonition)
# view an existing page
app.route('/<name>', method='GET')(views.view_page)
# write new content to an existing page
app.route('/<name>', method='POST')(views.view_page)
# write new content to an existing page (without commit - for quick save)
app.route('/<name>', method='PUT')(views.view_quick_save_page)
# for devt purpose: set bottle in debug mode
bottle.debug(True) # this line may be commented in production mode
# run locally by default
import argparse
cmd_parser = argparse.ArgumentParser(
description="usage: %prog package.module:app")
cmd_parser.add_argument('-u', '--user', help='user name for auth',
default=None)
cmd_parser.add_argument('-p', '--password', help='password for auth',
default=None)
cmd_parser.add_argument('host', help='host to bind',
default='localhost', nargs='?')
cmd_parser.add_argument('port', help='bind port',
default='8080', nargs='?')
args = cmd_parser.parse_args()
attowiki.user = args.user
attowiki.password = args.password
if ':' in args.host:
args.host, args.port = args.host.rsplit(':', 1)
bottle.run(app, host=args.host, port=args.port) | main entry point
launches the webserver locally | train | https://github.com/ThomasChiroux/attowiki/blob/6c93c420305490d324fdc95a7b40b2283a222183/src/attowiki/main.py#L42-L145 | [
"def add_node(node, **kwds):\n \"\"\"add_node from Sphinx\n \"\"\"\n nodes._add_node_class_names([node.__name__])\n for key, val in kwds.iteritems():\n try:\n visit, depart = val\n except ValueError:\n raise ValueError('Value for key %r must be a '\n '(visit, depart) function tuple' % key)\n if key == 'html':\n from docutils.writers.html4css1 import HTMLTranslator as translator\n elif key == 'latex':\n from docutils.writers.latex2e import LaTeXTranslator as translator\n else:\n # ignore invalid keys for compatibility\n continue\n setattr(translator, 'visit_'+node.__name__, visit)\n if depart:\n setattr(translator, 'depart_'+node.__name__, depart)\n",
"def attowiki_distro_path():\n \"\"\"return the absolute complete path where attowiki is located\n\n .. todo:: use pkg_resources ?\n \"\"\"\n attowiki_path = os.path.abspath(__file__)\n if attowiki_path[-1] != '/':\n attowiki_path = attowiki_path[:attowiki_path.rfind('/')]\n else:\n attowiki_path = attowiki_path[:attowiki_path[:-1].rfind('/')]\n return attowiki_path\n"
] | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Thomas Chiroux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program.
# If not, see <http://www.gnu.org/licenses/lgpl-3.0.html>
#
"""file for main entry point
"""
__authors__ = [
# alphabetical order by last name
'Thomas Chiroux', ]
# dependencies imports
import bottle
from git import Repo, InvalidGitRepositoryError
from docutils.parsers.rst import directives
from docutils import nodes, languages
# project imports
import attowiki
from attowiki import views
from attowiki.rst_directives import add_node
from attowiki.rst_directives import todo, visit_todo, depart_todo, Todo
from attowiki.rst_directives import done, visit_done, depart_done, Done
from attowiki.tools import attowiki_distro_path
|
robinagist/ezo | ezo/core/helpers.py | get_topic_sha3 | python | def get_topic_sha3(event_block):
'''
takes an event block and returns a signature for sha3 hashing
:param event_block:
:return:
'''
sig = ""
sig += event_block["name"]
if not event_block["inputs"]:
sig += "()"
return sig
sig += "("
for input in event_block["inputs"]:
sig += input["type"]
sig += ","
sig = sig[:-1]
sig += ")"
return sig | takes an event block and returns a signature for sha3 hashing
:param event_block:
:return: | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/helpers.py#L63-L83 | null | import json, xxhash
from hexbytes import HexBytes
class HexJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, HexBytes):
return obj.hex()
return super().default(obj)
# returns the url for the stage
def get_url(config, target):
cfg = config["target"][target]
return cfg["url"]
# returns the account address for the stage
def get_account(config, target):
cfg = config["target"][target]
return cfg["account"]
# returns the base directory for contacts
def get_contract_path(config, filename=None):
if filename:
return "{}/{}".format(config["contract-dir"], filename)
return config["contract-dir"]
# returns a full path to the handler directory
def get_handler_path(config, contract_name=None):
if contract_name:
return "{}/{}".format(config["handlers-dir"], contract_name)
return config["handlers-dir"]
def get_templates_path(config, template_name=None):
if template_name:
return "{}/{}".format(config["templates-dir"], template_name)
return config["templates-dir"]
# returns an xxhash of the passed string
def get_hash(str):
bs = bytes(str, 'utf-8')
return xxhash.xxh64(bs).hexdigest()
# returns the sha3 topic for the event method
'''
{
"anonymous": false,
"inputs": [
{
"indexed": false,
"name": "rtemp",
"type": "uint256"
}
],
"name": "FilledRequest",
"type": "event"
}
'''
### text tools
def red(str):
return("{}{}{}".format('\033[31m', str, '\033[39m'))
def green(str):
return("{}{}{}".format('\033[32m', str, '\033[39m'))
def yellow(str):
return("{}{}{}".format('\033[33m', str, '\033[39m'))
def blue(str):
return("{}{}{}".format('\033[34m', str, '\033[39m'))
def magenta(str):
return("{}{}{}".format('\033[35m', str, '\033[39m'))
def cyan(str):
return("{}{}{}".format('\033[36m', str, '\033[39m'))
def white(str):
return("{}{}{}".format('\033[37m', str, '\033[39m'))
def reset(str):
return("{}{}".format('\033[0m', str))
def bright(str):
return("{}{}".format('\033[1m', str))
def normal(str):
return("{}{}".format('\033[1m', str))
|
robinagist/ezo | ezo/core/tm_utils.py | EzoABCI.info | python | def info(self, req) -> ResponseInfo:
r = ResponseInfo()
r.version = "1.0"
r.last_block_height = 0
r.last_block_app_hash = b''
return r | Since this will always respond with height=0, Tendermint
will resync this app from the begining | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/tm_utils.py#L34-L43 | null | class EzoABCI(BaseApplication):
def init_chain(self, req) -> ResponseInitChain:
"""Set initial state on first run"""
self.txCount = 0
self.last_block_height = 0
return ResponseInitChain()
def check_tx(self, tx) -> ResponseCheckTx:
"""
Validate the Tx before entry into the mempool
Checks the txs are submitted in order 1,2,3...
If not an order, a non-zero code is returned and the tx
will be dropped.
"""
value = decode_number(tx)
if not value == (self.txCount + 1):
# respond with non-zero code
return ResponseCheckTx(code=1)
return ResponseCheckTx(code=CodeTypeOk)
def deliver_tx(self, tx) -> ResponseDeliverTx:
"""Simply increment the state"""
self.txCount += 1
return ResponseDeliverTx(code=CodeTypeOk)
def query(self, req) -> ResponseQuery:
"""Return the last tx count"""
v = encode_number(self.txCount)
return ResponseQuery(code=CodeTypeOk, value=v, height=self.last_block_height)
def commit(self) -> ResponseCommit:
"""Return the current encode state value to tendermint"""
hash = struct.pack('>Q', self.txCount)
return ResponseCommit(data=hash) |
robinagist/ezo | ezo/core/tm_utils.py | EzoABCI.check_tx | python | def check_tx(self, tx) -> ResponseCheckTx:
value = decode_number(tx)
if not value == (self.txCount + 1):
# respond with non-zero code
return ResponseCheckTx(code=1)
return ResponseCheckTx(code=CodeTypeOk) | Validate the Tx before entry into the mempool
Checks the txs are submitted in order 1,2,3...
If not an order, a non-zero code is returned and the tx
will be dropped. | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/tm_utils.py#L51-L62 | [
"def decode_number(raw):\n return int.from_bytes(raw, byteorder='big')\n"
] | class EzoABCI(BaseApplication):
def info(self, req) -> ResponseInfo:
"""
Since this will always respond with height=0, Tendermint
will resync this app from the begining
"""
r = ResponseInfo()
r.version = "1.0"
r.last_block_height = 0
r.last_block_app_hash = b''
return r
def init_chain(self, req) -> ResponseInitChain:
"""Set initial state on first run"""
self.txCount = 0
self.last_block_height = 0
return ResponseInitChain()
def deliver_tx(self, tx) -> ResponseDeliverTx:
"""Simply increment the state"""
self.txCount += 1
return ResponseDeliverTx(code=CodeTypeOk)
def query(self, req) -> ResponseQuery:
"""Return the last tx count"""
v = encode_number(self.txCount)
return ResponseQuery(code=CodeTypeOk, value=v, height=self.last_block_height)
def commit(self) -> ResponseCommit:
"""Return the current encode state value to tendermint"""
hash = struct.pack('>Q', self.txCount)
return ResponseCommit(data=hash) |
robinagist/ezo | ezo/core/tm_utils.py | EzoABCI.query | python | def query(self, req) -> ResponseQuery:
v = encode_number(self.txCount)
return ResponseQuery(code=CodeTypeOk, value=v, height=self.last_block_height) | Return the last tx count | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/tm_utils.py#L70-L73 | [
"def encode_number(value):\n return struct.pack('>I', value)\n"
] | class EzoABCI(BaseApplication):
def info(self, req) -> ResponseInfo:
"""
Since this will always respond with height=0, Tendermint
will resync this app from the begining
"""
r = ResponseInfo()
r.version = "1.0"
r.last_block_height = 0
r.last_block_app_hash = b''
return r
def init_chain(self, req) -> ResponseInitChain:
"""Set initial state on first run"""
self.txCount = 0
self.last_block_height = 0
return ResponseInitChain()
def check_tx(self, tx) -> ResponseCheckTx:
"""
Validate the Tx before entry into the mempool
Checks the txs are submitted in order 1,2,3...
If not an order, a non-zero code is returned and the tx
will be dropped.
"""
value = decode_number(tx)
if not value == (self.txCount + 1):
# respond with non-zero code
return ResponseCheckTx(code=1)
return ResponseCheckTx(code=CodeTypeOk)
def deliver_tx(self, tx) -> ResponseDeliverTx:
"""Simply increment the state"""
self.txCount += 1
return ResponseDeliverTx(code=CodeTypeOk)
def commit(self) -> ResponseCommit:
"""Return the current encode state value to tendermint"""
hash = struct.pack('>Q', self.txCount)
return ResponseCommit(data=hash) |
robinagist/ezo | ezo/core/tm_utils.py | EzoABCI.commit | python | def commit(self) -> ResponseCommit:
hash = struct.pack('>Q', self.txCount)
return ResponseCommit(data=hash) | Return the current encode state value to tendermint | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/tm_utils.py#L75-L78 | null | class EzoABCI(BaseApplication):
def info(self, req) -> ResponseInfo:
"""
Since this will always respond with height=0, Tendermint
will resync this app from the begining
"""
r = ResponseInfo()
r.version = "1.0"
r.last_block_height = 0
r.last_block_app_hash = b''
return r
def init_chain(self, req) -> ResponseInitChain:
"""Set initial state on first run"""
self.txCount = 0
self.last_block_height = 0
return ResponseInitChain()
def check_tx(self, tx) -> ResponseCheckTx:
"""
Validate the Tx before entry into the mempool
Checks the txs are submitted in order 1,2,3...
If not an order, a non-zero code is returned and the tx
will be dropped.
"""
value = decode_number(tx)
if not value == (self.txCount + 1):
# respond with non-zero code
return ResponseCheckTx(code=1)
return ResponseCheckTx(code=CodeTypeOk)
def deliver_tx(self, tx) -> ResponseDeliverTx:
"""Simply increment the state"""
self.txCount += 1
return ResponseDeliverTx(code=CodeTypeOk)
def query(self, req) -> ResponseQuery:
"""Return the last tx count"""
v = encode_number(self.txCount)
return ResponseQuery(code=CodeTypeOk, value=v, height=self.last_block_height)
|
robinagist/ezo | ezo/core/lib.py | EZO.dial | python | def dial(self, target):
'''
connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error
'''
if not target:
return None, "target network must be specified with -t or --target"
url = get_url(self.config, target)
try:
if url.startswith('ws'):
self.w3 = Web3(WebsocketProvider(url))
elif url.startswith('http'):
self.w3 = Web3(HTTPProvider(url))
elif url.endswith('ipc'):
if url == 'ipc':
url = None
self.w3 = Web3(Web3.IPCProvider(url))
else:
return None, "Invalid Provider URL: {}".format(url)
except Exception as e:
return None, e
return self.w3, None | connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L41-L70 | [
"def get_url(config, target):\n cfg = config[\"target\"][target]\n return cfg[\"url\"]\n"
] | class EZO:
'''
Easy Oracle (ezo) base class
'''
_listeners = dict()
db = None
log = None
# prefix keys for leveldb
CONTRACT = "CONTRACT"
COMPILED = "COMPILED"
DEPLOYED = "DEPLOYED"
def __init__(self, config):
if not config:
return
self.config = config
# self.target = None
self.w3 = None
EZO.db = DB(config["project-name"], config["leveldb"] )
def start(self, contract_names, target):
'''
loads the contracts -- starts their event listeners
:param contract_names:
:return:
'''
if isinstance(contract_names, str):
contract_names = [contract_names]
if not isinstance(contract_names, list):
return None, "error: expecting a string, or a list of contract names"
contract_listeners = []
for name in contract_names:
c, err = Contract.get(name, self)
if err:
EZO.log.error(red("error loading contract {}".format(name)))
EZO.log.error(red(err))
continue
if not c:
EZO.log.warn(blue("contract {} not found".format(name)))
continue
address, err = Contract.get_address(name, c.hash, self.db, target=target)
if err:
EZO.log.error(red("error obtaining address for contract {}").format(name))
EZO.log.error(red(err))
continue
if not address:
EZO.log.error(red("no address for contract {}".format(name)))
continue
contract_listeners.append(c.listen(address, target))
if contract_listeners:
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.gather(*contract_listeners)
)
else:
return None, "unable to start contract listeners"
@staticmethod
def create_project(name, include_examples=True):
'''
creates the initial project skeleton and files
:param name: the project name
:return:
'''
# from the start directory, create a project directory with the project name
path = "{}/{}".format(os.getcwd(), name)
if os.path.exists(path):
return None, "path {} already exists".format(path)
print(bright("creating new ezo project '{}".format(name)))
# make project directory
os.mkdir(path)
print(bright("created project directory: '{}".format(path)))
# create an empty contracts directory
contracts_dir = "{}/{}".format(path, "contracts")
os.mkdir(contracts_dir)
print(bright("created contract directory: '{}".format(contracts_dir)))
if include_examples:
c = [(create_sample_contracts_1(), 'contract1.sol'), (create_sample_contracts_2(), 'contract2.sol')]
for s in c:
c, fn = s
file_path = "{}/{}".format(contracts_dir, fn)
try:
with open(file_path, "w+") as outfile:
outfile.write(c)
except Exception as e:
print(bright("problem creating sample file: '{}".format(path)))
return None, e
print(bright("created sample contract: '{}".format(fn)))
# create the handlers directory
handlers_dir = "{}/{}".format(path, "handlers")
os.mkdir(handlers_dir)
print(bright("created handlers directory: '{}".format(handlers_dir)))
# leveldb directory (created by level)
leveldb = "{}/{}".format(path, "ezodb")
# create the initial config.json file
cfg = create_blank_config_obj()
cfg["ezo"]["contract-dir"] = contracts_dir
cfg["ezo"]["handlers-dir"] = handlers_dir
cfg["ezo"]["project-name"] = name
cfg["ezo"]["leveldb"] = leveldb
print(bright("creating configuration: '{}".format(path)))
# write the file to the root project dir
config_file_path = "{}/{}".format(path, "ezo.conf")
try:
with open(config_file_path, "w+") as outfile:
json.dump(cfg, outfile, indent=2)
except Exception as e:
print(bright("problem creating configuration file: '{}".format(path)))
return None, e
return None, None
|
robinagist/ezo | ezo/core/lib.py | EZO.start | python | def start(self, contract_names, target):
'''
loads the contracts -- starts their event listeners
:param contract_names:
:return:
'''
if isinstance(contract_names, str):
contract_names = [contract_names]
if not isinstance(contract_names, list):
return None, "error: expecting a string, or a list of contract names"
contract_listeners = []
for name in contract_names:
c, err = Contract.get(name, self)
if err:
EZO.log.error(red("error loading contract {}".format(name)))
EZO.log.error(red(err))
continue
if not c:
EZO.log.warn(blue("contract {} not found".format(name)))
continue
address, err = Contract.get_address(name, c.hash, self.db, target=target)
if err:
EZO.log.error(red("error obtaining address for contract {}").format(name))
EZO.log.error(red(err))
continue
if not address:
EZO.log.error(red("no address for contract {}".format(name)))
continue
contract_listeners.append(c.listen(address, target))
if contract_listeners:
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.gather(*contract_listeners)
)
else:
return None, "unable to start contract listeners" | loads the contracts -- starts their event listeners
:param contract_names:
:return: | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L72-L115 | [
"def red(str):\n return(\"{}{}{}\".format('\\033[31m', str, '\\033[39m'))\n",
"def blue(str):\n return(\"{}{}{}\".format('\\033[34m', str, '\\033[39m'))\n",
"def get(name, ezo):\n '''\n get the latest compiled contract instance by contract name\n :param name:\n :param ezo:\n :return:\n '''\n\n key = DB.pkey([EZO.CONTRACT, name])\n cp, err = ezo.db.get(key)\n if err:\n return None, err\n\n if not cp:\n return None, None\n\n # create a new Contract\n c = Contract(cp[\"name\"], ezo)\n c.abi = cp[\"abi\"]\n c.bin = cp[\"bin\"]\n c.hash = cp[\"hash\"]\n c.source = cp[\"source\"]\n c.timestamp = cp[\"timestamp\"]\n c.te_map = cp['te-map']\n\n return c, None\n",
"def get_address(name, hash, db, target=None):\n '''\n fetches the contract address of deployment\n\n :param hash: the contract file hash\n :return: (string) address of the contract\n error, if any\n '''\n\n key = DB.pkey([EZO.DEPLOYED, name, target, hash])\n\n d, err = db.get(key)\n if err:\n return None, err\n if not d:\n return None, None\n return d['address'].lower(), None\n"
] | class EZO:
'''
Easy Oracle (ezo) base class
'''
_listeners = dict()
db = None
log = None
# prefix keys for leveldb
CONTRACT = "CONTRACT"
COMPILED = "COMPILED"
DEPLOYED = "DEPLOYED"
def __init__(self, config):
if not config:
return
self.config = config
# self.target = None
self.w3 = None
EZO.db = DB(config["project-name"], config["leveldb"] )
def dial(self, target):
'''
connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error
'''
if not target:
return None, "target network must be specified with -t or --target"
url = get_url(self.config, target)
try:
if url.startswith('ws'):
self.w3 = Web3(WebsocketProvider(url))
elif url.startswith('http'):
self.w3 = Web3(HTTPProvider(url))
elif url.endswith('ipc'):
if url == 'ipc':
url = None
self.w3 = Web3(Web3.IPCProvider(url))
else:
return None, "Invalid Provider URL: {}".format(url)
except Exception as e:
return None, e
return self.w3, None
@staticmethod
def create_project(name, include_examples=True):
'''
creates the initial project skeleton and files
:param name: the project name
:return:
'''
# from the start directory, create a project directory with the project name
path = "{}/{}".format(os.getcwd(), name)
if os.path.exists(path):
return None, "path {} already exists".format(path)
print(bright("creating new ezo project '{}".format(name)))
# make project directory
os.mkdir(path)
print(bright("created project directory: '{}".format(path)))
# create an empty contracts directory
contracts_dir = "{}/{}".format(path, "contracts")
os.mkdir(contracts_dir)
print(bright("created contract directory: '{}".format(contracts_dir)))
if include_examples:
c = [(create_sample_contracts_1(), 'contract1.sol'), (create_sample_contracts_2(), 'contract2.sol')]
for s in c:
c, fn = s
file_path = "{}/{}".format(contracts_dir, fn)
try:
with open(file_path, "w+") as outfile:
outfile.write(c)
except Exception as e:
print(bright("problem creating sample file: '{}".format(path)))
return None, e
print(bright("created sample contract: '{}".format(fn)))
# create the handlers directory
handlers_dir = "{}/{}".format(path, "handlers")
os.mkdir(handlers_dir)
print(bright("created handlers directory: '{}".format(handlers_dir)))
# leveldb directory (created by level)
leveldb = "{}/{}".format(path, "ezodb")
# create the initial config.json file
cfg = create_blank_config_obj()
cfg["ezo"]["contract-dir"] = contracts_dir
cfg["ezo"]["handlers-dir"] = handlers_dir
cfg["ezo"]["project-name"] = name
cfg["ezo"]["leveldb"] = leveldb
print(bright("creating configuration: '{}".format(path)))
# write the file to the root project dir
config_file_path = "{}/{}".format(path, "ezo.conf")
try:
with open(config_file_path, "w+") as outfile:
json.dump(cfg, outfile, indent=2)
except Exception as e:
print(bright("problem creating configuration file: '{}".format(path)))
return None, e
return None, None
|
robinagist/ezo | ezo/core/lib.py | EZO.create_project | python | def create_project(name, include_examples=True):
'''
creates the initial project skeleton and files
:param name: the project name
:return:
'''
# from the start directory, create a project directory with the project name
path = "{}/{}".format(os.getcwd(), name)
if os.path.exists(path):
return None, "path {} already exists".format(path)
print(bright("creating new ezo project '{}".format(name)))
# make project directory
os.mkdir(path)
print(bright("created project directory: '{}".format(path)))
# create an empty contracts directory
contracts_dir = "{}/{}".format(path, "contracts")
os.mkdir(contracts_dir)
print(bright("created contract directory: '{}".format(contracts_dir)))
if include_examples:
c = [(create_sample_contracts_1(), 'contract1.sol'), (create_sample_contracts_2(), 'contract2.sol')]
for s in c:
c, fn = s
file_path = "{}/{}".format(contracts_dir, fn)
try:
with open(file_path, "w+") as outfile:
outfile.write(c)
except Exception as e:
print(bright("problem creating sample file: '{}".format(path)))
return None, e
print(bright("created sample contract: '{}".format(fn)))
# create the handlers directory
handlers_dir = "{}/{}".format(path, "handlers")
os.mkdir(handlers_dir)
print(bright("created handlers directory: '{}".format(handlers_dir)))
# leveldb directory (created by level)
leveldb = "{}/{}".format(path, "ezodb")
# create the initial config.json file
cfg = create_blank_config_obj()
cfg["ezo"]["contract-dir"] = contracts_dir
cfg["ezo"]["handlers-dir"] = handlers_dir
cfg["ezo"]["project-name"] = name
cfg["ezo"]["leveldb"] = leveldb
print(bright("creating configuration: '{}".format(path)))
# write the file to the root project dir
config_file_path = "{}/{}".format(path, "ezo.conf")
try:
with open(config_file_path, "w+") as outfile:
json.dump(cfg, outfile, indent=2)
except Exception as e:
print(bright("problem creating configuration file: '{}".format(path)))
return None, e
return None, None | creates the initial project skeleton and files
:param name: the project name
:return: | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L119-L181 | [
"def bright(str):\n return(\"{}{}\".format('\\033[1m', str))\n",
"def create_blank_config_obj():\n\n ks, err = Source.generate(\"ezoconf.m\")\n if err:\n return None, err\n return ks, None\n",
"def create_sample_contracts_1():\n\n # WeatherOracle\n\n ks, err = Source.generate(\"sample_contract_01.m\")\n if err:\n return None, err\n return ks, None\n",
"def create_sample_contracts_2():\n\n #TimestampOracle\n\n ks, err = Source.generate(\"sample_contract_02.m\")\n if err:\n return None, err\n return ks, None\n"
] | class EZO:
'''
Easy Oracle (ezo) base class
'''
_listeners = dict()
db = None
log = None
# prefix keys for leveldb
CONTRACT = "CONTRACT"
COMPILED = "COMPILED"
DEPLOYED = "DEPLOYED"
def __init__(self, config):
if not config:
return
self.config = config
# self.target = None
self.w3 = None
EZO.db = DB(config["project-name"], config["leveldb"] )
def dial(self, target):
'''
connects to a node
:param url: string (optional) - resource in which to connect.
if not provided, will use default for the stage
:returns: provider, error
'''
if not target:
return None, "target network must be specified with -t or --target"
url = get_url(self.config, target)
try:
if url.startswith('ws'):
self.w3 = Web3(WebsocketProvider(url))
elif url.startswith('http'):
self.w3 = Web3(HTTPProvider(url))
elif url.endswith('ipc'):
if url == 'ipc':
url = None
self.w3 = Web3(Web3.IPCProvider(url))
else:
return None, "Invalid Provider URL: {}".format(url)
except Exception as e:
return None, e
return self.w3, None
def start(self, contract_names, target):
'''
loads the contracts -- starts their event listeners
:param contract_names:
:return:
'''
if isinstance(contract_names, str):
contract_names = [contract_names]
if not isinstance(contract_names, list):
return None, "error: expecting a string, or a list of contract names"
contract_listeners = []
for name in contract_names:
c, err = Contract.get(name, self)
if err:
EZO.log.error(red("error loading contract {}".format(name)))
EZO.log.error(red(err))
continue
if not c:
EZO.log.warn(blue("contract {} not found".format(name)))
continue
address, err = Contract.get_address(name, c.hash, self.db, target=target)
if err:
EZO.log.error(red("error obtaining address for contract {}").format(name))
EZO.log.error(red(err))
continue
if not address:
EZO.log.error(red("no address for contract {}".format(name)))
continue
contract_listeners.append(c.listen(address, target))
if contract_listeners:
loop = asyncio.get_event_loop()
loop.run_until_complete(
asyncio.gather(*contract_listeners)
)
else:
return None, "unable to start contract listeners"
@staticmethod
|
robinagist/ezo | ezo/core/lib.py | Contract.deploy | python | def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None | deploy this contract
:param target:
:param account: the account address to use
:return: address, err | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L227-L289 | [
"def get_account(config, target):\n cfg = config[\"target\"][target]\n return cfg[\"account\"]\n",
"def pkey(elems):\n key = \"\"\n for e in elems:\n key += e\n key += \":\"\n return bytes(key, 'utf-8')\n"
] | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close()
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None
@staticmethod
def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None
@staticmethod
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None
@staticmethod
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None
@staticmethod
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None
|
robinagist/ezo | ezo/core/lib.py | Contract.listen | python | async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close() | starts event listener for the contract
:return: | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L291-L315 | [
"def blue(str):\n return(\"{}{}{}\".format('\\033[34m', str, '\\033[39m'))\n",
"def bright(str):\n return(\"{}{}\".format('\\033[1m', str))\n",
"def handler(rce, contract, target):\n\n ce = ContractEvent(rce, target)\n\n # find the mappped method for the topic\n if ce.event_topic in contract.te_map:\n handler_path = contract.te_map[ce.event_topic]\n s = importlib.util.spec_from_file_location(\"handlers\", handler_path)\n handler_module = importlib.util.module_from_spec(s)\n s.loader.exec_module(handler_module)\n handler_module.handler(ce, contract)\n\n else:\n EZO.log.warn(blue(\"topic {} not in map\".format(ce.event_topic)))\n"
] | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None
@staticmethod
def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None
@staticmethod
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None
@staticmethod
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None
@staticmethod
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None
|
robinagist/ezo | ezo/core/lib.py | Contract.response | python | def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None | called by the event handler with the result data
:param response_data: result data
:return: | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L317-L370 | [
"def get_account(config, target):\n cfg = config[\"target\"][target]\n return cfg[\"account\"]\n"
] | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close()
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None
@staticmethod
def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None
@staticmethod
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None
@staticmethod
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None
@staticmethod
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None
|
robinagist/ezo | ezo/core/lib.py | Contract.send | python | def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None | runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return: | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L455-L485 | [
"def get(name, ezo):\n '''\n get the latest compiled contract instance by contract name\n :param name:\n :param ezo:\n :return:\n '''\n\n key = DB.pkey([EZO.CONTRACT, name])\n cp, err = ezo.db.get(key)\n if err:\n return None, err\n\n if not cp:\n return None, None\n\n # create a new Contract\n c = Contract(cp[\"name\"], ezo)\n c.abi = cp[\"abi\"]\n c.bin = cp[\"bin\"]\n c.hash = cp[\"hash\"]\n c.source = cp[\"source\"]\n c.timestamp = cp[\"timestamp\"]\n c.te_map = cp['te-map']\n\n return c, None\n",
"def get_address(name, hash, db, target=None):\n '''\n fetches the contract address of deployment\n\n :param hash: the contract file hash\n :return: (string) address of the contract\n error, if any\n '''\n\n key = DB.pkey([EZO.DEPLOYED, name, target, hash])\n\n d, err = db.get(key)\n if err:\n return None, err\n if not d:\n return None, None\n return d['address'].lower(), None\n"
] | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close()
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
@staticmethod
def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None
@staticmethod
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None
@staticmethod
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None
@staticmethod
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None
|
robinagist/ezo | ezo/core/lib.py | Contract.call | python | def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None | calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return: | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L488-L529 | [
"def get_account(config, target):\n cfg = config[\"target\"][target]\n return cfg[\"account\"]\n",
"def get(name, ezo):\n '''\n get the latest compiled contract instance by contract name\n :param name:\n :param ezo:\n :return:\n '''\n\n key = DB.pkey([EZO.CONTRACT, name])\n cp, err = ezo.db.get(key)\n if err:\n return None, err\n\n if not cp:\n return None, None\n\n # create a new Contract\n c = Contract(cp[\"name\"], ezo)\n c.abi = cp[\"abi\"]\n c.bin = cp[\"bin\"]\n c.hash = cp[\"hash\"]\n c.source = cp[\"source\"]\n c.timestamp = cp[\"timestamp\"]\n c.te_map = cp['te-map']\n\n return c, None\n",
"def get_address(name, hash, db, target=None):\n '''\n fetches the contract address of deployment\n\n :param hash: the contract file hash\n :return: (string) address of the contract\n error, if any\n '''\n\n key = DB.pkey([EZO.DEPLOYED, name, target, hash])\n\n d, err = db.get(key)\n if err:\n return None, err\n if not d:\n return None, None\n return d['address'].lower(), None\n"
] | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close()
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None
@staticmethod
@staticmethod
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None
@staticmethod
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None
@staticmethod
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None
|
robinagist/ezo | ezo/core/lib.py | Contract.get | python | def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None | get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return: | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L533-L558 | [
"def pkey(elems):\n key = \"\"\n for e in elems:\n key += e\n key += \":\"\n return bytes(key, 'utf-8')\n"
] | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close()
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None
@staticmethod
def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None
@staticmethod
@staticmethod
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None
@staticmethod
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None
@staticmethod
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None
|
robinagist/ezo | ezo/core/lib.py | Contract.create_from_hash | python | def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None | given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L562-L583 | null | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close()
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None
@staticmethod
def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None
@staticmethod
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
@staticmethod
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None
@staticmethod
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None
@staticmethod
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None
|
robinagist/ezo | ezo/core/lib.py | Contract.load | python | def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None | loads a contract file
:param filepath: (string) - contract filename
:return: source, err | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L586-L599 | null | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close()
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None
@staticmethod
def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None
@staticmethod
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
@staticmethod
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None
@staticmethod
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None
|
robinagist/ezo | ezo/core/lib.py | Contract.compile | python | def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None | compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L602-L622 | null | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close()
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None
@staticmethod
def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None
@staticmethod
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None
@staticmethod
@staticmethod
def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None
|
robinagist/ezo | ezo/core/lib.py | Contract.get_address | python | def get_address(name, hash, db, target=None):
'''
fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any
'''
key = DB.pkey([EZO.DEPLOYED, name, target, hash])
d, err = db.get(key)
if err:
return None, err
if not d:
return None, None
return d['address'].lower(), None | fetches the contract address of deployment
:param hash: the contract file hash
:return: (string) address of the contract
error, if any | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L625-L641 | [
"def pkey(elems):\n key = \"\"\n for e in elems:\n key += e\n key += \":\"\n return bytes(key, 'utf-8')\n"
] | class Contract:
def __init__(self, name, ezo):
self.name = name
self._ezo = ezo
self.timestamp = datetime.utcnow()
self.hash = None
self.abi = None
self.bin = None
self.source = None
self.te_map = dict()
self.contract_obj = None
def deploy(self, target, overwrite=False):
'''
deploy this contract
:param target:
:param account: the account address to use
:return: address, err
'''
name = self.name.replace('<stdin>:', "")
key = DB.pkey([EZO.DEPLOYED, name, target, self.hash])
if not target:
return None, "target network must be set with -t or --target"
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
# see if a deployment already exists for this contract on this target
if not overwrite:
res, err = self._ezo.db.get(key)
if err:
return None, "ERROR: Contract.deployment() {}".format(err)
if res:
return None, "deployment on {} already exists for contract {} use '--overwrite' to force".format(target, self.hash)
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, target))
self._ezo.w3.eth.accounts[0] = account
try:
u_state = self._ezo.w3.personal.unlockAccount(account, password)
except Exception as e:
return None, "unable to unlock account for {} using password".format(account)
try:
ct = self._ezo.w3.eth.contract(abi=self.abi, bytecode=self.bin)
gas_estimate = ct.constructor().estimateGas()
h = {'from': account, 'gas': gas_estimate + 1000}
tx_hash = ct.constructor().transact(h)
tx_receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt['contractAddress']
except Exception as e:
return None, e
# finally:
# self._ezo.w3.personal.lockAccount(account)
d = dict()
d["contract-name"] = self.name
d["hash"] = self.hash
d["tx-hash"] = tx_hash
d["address"] = address
d["gas-used"] = tx_receipt["gasUsed"]
d["target"] = target
d["timestamp"] = datetime.utcnow()
# save the deployment information
try:
_, err = self._ezo.db.save(key, d, overwrite=overwrite)
if err:
return None, err
except Exception as e:
return None, e
return address, None
async def listen(self, address, target):
'''
starts event listener for the contract
:return:
'''
if not address:
return None, "listening address not provided"
EZO.log.info(bright("hello ezo::listening to address: {}".format(blue(address))))
interval = self._ezo.config["poll-interval"]
event_filter = self._ezo.w3.eth.filter({"address": address, "toBlock": "latest"})
loop = asyncio.new_event_loop()
try:
while True:
for event in event_filter.get_new_entries():
if EZO.log:
EZO.log.debug(bright("event received: {}".format(event)))
ContractEvent.handler(event, self, target)
await asyncio.sleep(interval)
except Exception as e:
return None, e
finally:
loop.close()
def response(self, response_data):
'''
called by the event handler with the result data
:param response_data: result data
:return:
'''
if "address" not in response_data:
return None, "address missing from response_data payload"
if "function" not in response_data:
return None, "method missing from response_data payload"
if "params" not in response_data:
return None, "params missing from response_data payload"
if "target" not in response_data:
return None, "target missing from response_data payload"
address = self._ezo.w3.toChecksumAddress(response_data["address"])
account = self._ezo.w3.toChecksumAddress(get_account(self._ezo.config, response_data["target"]))
self._ezo.w3.eth.accounts[0] = account
tx_dict = dict()
tx_dict["account"] = account
tx_dict["from"] = account
password = os.environ['EZO_PASSWORD'] if 'EZO_PASSWORD' in os.environ else None
u_state = self._ezo.w3.personal.unlockAccount(account, password)
if not self.contract_obj:
try:
self.contract_obj = self._ezo.w3.eth.contract(address=address, abi=self.abi)
except Exception as e:
return None, e
method = response_data["function"]
params = response_data["params"]
contract_func = self.contract_obj.functions[method]
try:
if not params:
tx_dict["gas"] = contract_func().estimateGas() + 1000
tx_hash = contract_func().transact(tx_dict)
else:
tx_dict["gas"] = contract_func(*params).estimateGas() + 1000
tx_hash = contract_func(*params).transact(tx_dict)
receipt = self._ezo.w3.eth.waitForTransactionReceipt(tx_hash)
except Exception as e:
return None, "error executing transaction: {}".format(e)
# finally:
# self._ezo.w3.personal.lockAccount(account)
return receipt, None
def save(self, overwrite=False):
c = dict()
c["name"] = self.name
c["abi"] = self.abi
c["bin"] = self.bin
c["source"] = self.source
c["hash"] = get_hash(self.source)
c["timestamp"] = self.timestamp
c["te-map"] = self.te_map
# save to compiled contract
name = self.name.replace('<stdin>:',"")
key = DB.pkey([EZO.COMPILED, name, c["hash"]])
ks, err = self._ezo.db.save(key, c, overwrite=overwrite)
if err:
return None, err
# save to contract
key = DB.pkey([EZO.CONTRACT, name])
ks, err = self._ezo.db.save(key, c, overwrite=True)
if err:
return None, err
return ks, None
def generate_event_handlers(self, overwrite=False):
# get the contract name, events from the abi
contract_name = inflection.underscore(self.name.replace('<stdin>:', ''))
errors = list()
events = [x for x in self.abi if x["type"] == "event"]
for event in events:
# get the topic sha3
topic = Web3.sha3(text=get_topic_sha3(event))
# build full path to new event handler
hp = get_handler_path(self._ezo.config, contract_name)
if not os.path.isdir(hp):
os.mkdir(hp)
event_name = inflection.underscore(event['name'])
eh = "{}/{}_{}".format(hp, event_name, "handler.py")
# check to see if it exists
# if not, or if overwrite option is on
if not os.path.exists(eh) or overwrite:
# create event handler scaffold in python
code, err = gen_event_handler_code(event_name)
if err:
print(red("gen error: {}".format(err)))
try:
with open(eh, "w+") as f:
f.write(code)
except Exception as e:
print(red("gen error: {}".format(e)))
errors.append(e)
continue
# map the topic to the handler
self.te_map[topic] = eh
_, err = self.save(overwrite=True)
if err:
return None, err
return None, errors
def paramsForMethod(self, method, data):
'''
'''
v = ast.literal_eval(data)
if not v:
return None
return v
@staticmethod
def send(ezo, name, method, data, target):
'''
runs a transaction on a contract method
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
d = dict()
d["address"] = address
d["function"] = method
d["params"] = c.paramsForMethod(method, data)
d["target"] = target
resp, err = c.response(d)
if err:
return None, err
return resp, None
@staticmethod
def call(ezo, name, method, data, target):
'''
calls a method with data and returns a result without changing the chain state
:param ezo: ezo instance
:param name: name of the Contract
:param method: name of the contract method
:param data: formatted data to send to the contract method
:param target: the target network
:return:
'''
# load the contract by name
c, err = Contract.get(name, ezo)
if err:
return None, err
address, err = Contract.get_address(name, c.hash, ezo.db, target)
if err:
return None, err
params = c.paramsForMethod(method, data)
address = ezo.w3.toChecksumAddress(address)
ezo.w3.eth.defaultAccount = ezo.w3.toChecksumAddress(get_account(ezo.config, target))
if not c.contract_obj:
try:
c.contract_obj = ezo.w3.eth.contract(address=address, abi=c.abi)
except Exception as e:
return None, e
contract_func = c.contract_obj.functions[method]
try:
if not params:
result = contract_func().call()
else:
result = contract_func(*params).call()
except Exception as e:
return None, "error executing call: {}".format(e)
return result, None
@staticmethod
def get(name, ezo):
'''
get the latest compiled contract instance by contract name
:param name:
:param ezo:
:return:
'''
key = DB.pkey([EZO.CONTRACT, name])
cp, err = ezo.db.get(key)
if err:
return None, err
if not cp:
return None, None
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def create_from_hash(hash, ezo):
'''
given the hash of a contract, returns a contract from the data store
:param hash: (string) hash of the contract source code
:param ezo: ezo instance
:return: contract instance, error
'''
cp, err = ezo.db.get("contracts", hash)
if err:
return None, err
# create a new Contract
c = Contract(cp["name"], ezo)
c.abi = cp["abi"]
c.bin = cp["bin"]
c.hash = cp["hash"]
c.source = cp["source"]
c.timestamp = cp["timestamp"]
c.te_map = cp['te-map']
return c, None
@staticmethod
def load(filepath):
'''
loads a contract file
:param filepath: (string) - contract filename
:return: source, err
'''
try:
with open(filepath, "r") as fh:
source = fh.read()
except Exception as e:
return None, e
return source, None
@staticmethod
def compile(source, ezo):
'''
compiles the source code
:param source: (string) - contract source code
:param ezo: - ezo reference for Contract object creation
:return: (list) compiled source
'''
try:
compiled = compile_source(source)
compiled_list = []
for name in compiled:
c = Contract(name, ezo)
interface = compiled[name]
c.abi = interface['abi']
c.bin = interface['bin']
compiled_list.append(c)
except Exception as e:
return None, e
return compiled_list, None
@staticmethod
|
robinagist/ezo | ezo/core/lib.py | Catalog.put | python | def put(contract_name, abi):
'''
save the contract's ABI
:param contract_name: string - name of the contract
:param abi: the contract's abi JSON file
:return: None, None if saved okay
None, error is an error
'''
if not Catalog.path:
return None, "path to catalog must be set before saving to it"
if not contract_name:
return None, "contract name must be provided before saving"
if not abi:
return None, "contract ABI missing"
abi_file = "{}/{}.abi".format(Catalog.path, contract_name)
try:
with open(abi_file, "w+") as file:
file.write(abi)
except Exception as e:
return None, "Catalog.put error: {}".format(e)
return None, None | save the contract's ABI
:param contract_name: string - name of the contract
:param abi: the contract's abi JSON file
:return: None, None if saved okay
None, error is an error | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L658-L682 | null | class Catalog:
'''
a filesystem catalog for ABIs
motivation: LevelDB is a single user DB. Which means when the test client is executed against a contract
while ezo is running as an oracle, it cannot get access to contract ABI information. it needs to make
a contract call without having to recompile the contract itself. When ezo compiles a contract, it will save
the ABI to this filesystem catalog, so that the test client can access them while ezo runs as an oracle
in another process.
'''
path = None
@staticmethod
@staticmethod
def get(contract_name):
'''
return the contract's ABI, marshaled into python dict
:param contract_name: string - name of the contract to load
:return: ABI, None - if successful
None, error - if error
'''
if not Catalog.path:
return None, "path to catalog must be set before searching it"
if not contract_name:
return None, "contract name missing"
abi_file = "{}/{}.abi".format(Catalog.path, contract_name)
try:
with open(abi_file, "r") as file:
abi = file.read()
except Exception as e:
return None, "Catalog.get error: {}".format(e)
return abi, None
|
robinagist/ezo | ezo/core/lib.py | Catalog.get | python | def get(contract_name):
'''
return the contract's ABI, marshaled into python dict
:param contract_name: string - name of the contract to load
:return: ABI, None - if successful
None, error - if error
'''
if not Catalog.path:
return None, "path to catalog must be set before searching it"
if not contract_name:
return None, "contract name missing"
abi_file = "{}/{}.abi".format(Catalog.path, contract_name)
try:
with open(abi_file, "r") as file:
abi = file.read()
except Exception as e:
return None, "Catalog.get error: {}".format(e)
return abi, None | return the contract's ABI, marshaled into python dict
:param contract_name: string - name of the contract to load
:return: ABI, None - if successful
None, error - if error | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L686-L706 | null | class Catalog:
'''
a filesystem catalog for ABIs
motivation: LevelDB is a single user DB. Which means when the test client is executed against a contract
while ezo is running as an oracle, it cannot get access to contract ABI information. it needs to make
a contract call without having to recompile the contract itself. When ezo compiles a contract, it will save
the ABI to this filesystem catalog, so that the test client can access them while ezo runs as an oracle
in another process.
'''
path = None
@staticmethod
def put(contract_name, abi):
'''
save the contract's ABI
:param contract_name: string - name of the contract
:param abi: the contract's abi JSON file
:return: None, None if saved okay
None, error is an error
'''
if not Catalog.path:
return None, "path to catalog must be set before saving to it"
if not contract_name:
return None, "contract name must be provided before saving"
if not abi:
return None, "contract ABI missing"
abi_file = "{}/{}.abi".format(Catalog.path, contract_name)
try:
with open(abi_file, "w+") as file:
file.write(abi)
except Exception as e:
return None, "Catalog.put error: {}".format(e)
return None, None
@staticmethod
|
robinagist/ezo | ezo/core/lib.py | DB.open | python | def open(self):
'''
attempts to open the database. if it gets a locked message, it will wait one second and try
again. if it is still locked, it will return an error
:return: None, None if successful
None, error if error
'''
cycle = 2
count = 0
while(True):
try:
DB.db = plyvel.DB(DB.dbpath, create_if_missing=True).prefixed_db(bytes(DB.project, 'utf-8'))
if DB.db:
break
except Exception as e:
# wait for other program to unlock the db
count+=1
time.sleep(1)
if count >= cycle:
return None, "DB error: {}".format(e)
return None, None | attempts to open the database. if it gets a locked message, it will wait one second and try
again. if it is still locked, it will return an error
:return: None, None if successful
None, error if error | train | https://github.com/robinagist/ezo/blob/fae896daa1c896c7c50f2c9cfe3f7f9cdb3fc986/ezo/core/lib.py#L731-L754 | null | class DB:
'''
data storage abstraction layer for LevelDB
the db is opened and closed on demand. this allows multiple applications to use the same
DB at the same time. a pseudo lock-wait mechanism is implemented in the open method).
a caching object is placed ahead of and behind get method reads and behind save method writes. This keeps
oracle mode from having to hit leveldb very often at all.
'''
db = None
project = None
dbpath = None
cache = dict()
def __init__(self, project, dbpath=None):
DB.dbpath = dbpath if dbpath else '~/ezodb/'
DB.project = project if project else 'ezo_project_default'
def save(self, key, value, overwrite=False, serialize=True):
if isinstance(key, str):
key = bytes(key, 'utf-8')
if not overwrite:
a, err = self.get(key)
if err:
return None, err
if a:
return None, "{} already exists ".format(key)
_, err = self.open()
if err:
return None, err
v = pickle.dumps(value) if serialize else value
try:
DB.db.put(key, v)
except Exception as e:
return None, e
finally:
self.close()
DB.cache[key] = value
return key, None
def delete(self, key):
pass
def get(self, key, deserialize=True):
if isinstance(key, str):
key = bytes(key, 'utf-8')
if key in DB.cache:
return DB.cache[key], None
_, err = self.open()
if err:
return None, "DB.get error: {}".format(err)
val = DB.db.get(key)
if not val:
self.close()
return None, None
try:
if deserialize:
obj = pickle.loads(val)
else:
obj = val
except Exception as e:
return None, e
finally:
self.close()
DB.cache[key] = obj
return obj, None
def find(self, keypart):
_, err = self.open()
if err:
return None, err
if isinstance(keypart, str):
keypart = bytes(keypart, 'utf-8')
elif not isinstance(keypart, bytes):
return None, "keypart must be a string or byte string"
res = list()
try:
it = DB.db.iterator(prefix=keypart)
it.seek_to_start()
for key, value in it:
res.append({key.decode('utf-8'): pickle.loads(value)})
except Exception as e:
return None, e
finally:
self.close()
return res, None
def close(self):
# DB.db.db.close()
DB.db = None
@staticmethod
def pkey(elems):
key = ""
for e in elems:
key += e
key += ":"
return bytes(key, 'utf-8')
|
CMUSTRUDEL/strudel.utils | stutils/sysutils.py | mkdir | python | def mkdir(*args):
path = ''
for chunk in args:
path = os.path.join(path, chunk)
if not os.path.isdir(path):
os.mkdir(path)
return path | Create a directory specified by a sequence of subdirectories
>>> mkdir("/tmp", "foo", "bar", "baz")
'/tmp/foo/bar/baz'
>>> os.path.isdir('/tmp/foo/bar/baz')
True | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/sysutils.py#L11-L24 | null |
"""
Operations with files
"""
import six
import os
import subprocess
def shell(cmd, *args, **kwargs):
# type: (Union[str, unicode], *Union[str, unicode], **Any) ->Tuple[int, str]
""" Execute shell command and return output
Args:
cmd (str): the command itself, i.e. part until the first space
*args: positional arguments, i.e. other space-separated parts
rel_path (bool): execute relative to the path (default: `False`)
raise_on_status(bool): bool, raise exception if command
exited with non-zero status (default: `True`)
stderr (file-like): file-like object to collect stderr output,
None by default
Returns:
Tuple[int, str]: status, shell output
"""
if kwargs.get('rel_path') and not cmd.startswith("/"):
cmd = os.path.join(kwargs['rel_path'], cmd)
status = 0
try:
output = subprocess.check_output(
(cmd,) + args, stderr=kwargs.get('stderr'))
except subprocess.CalledProcessError as e:
if kwargs.get('raise_on_status', True):
raise e
output = e.output
status = e.returncode
except OSError as e: # command not found
if kwargs.get('raise_on_status', True):
raise e
if 'stderr' in kwargs:
kwargs['stderr'].write(e.message)
return -1, ""
if six.PY3:
output = output.decode('utf8')
return status, output
def raw_filesize(path):
# type: (str) -> Optional[int]
""" Get size of a file/directory in bytes.
Will return None if path does not exist or cannot be accessed.
"""
with open('/dev/null', 'w') as devnull:
status, output = shell("du", "-bs", path, raise_on_status=False,
stderr=devnull)
if status != 0:
return None
# output is: <size>\t<path>\n
return int(output.split("\t", 1)[0])
|
CMUSTRUDEL/strudel.utils | stutils/sysutils.py | shell | python | def shell(cmd, *args, **kwargs):
# type: (Union[str, unicode], *Union[str, unicode], **Any) ->Tuple[int, str]
if kwargs.get('rel_path') and not cmd.startswith("/"):
cmd = os.path.join(kwargs['rel_path'], cmd)
status = 0
try:
output = subprocess.check_output(
(cmd,) + args, stderr=kwargs.get('stderr'))
except subprocess.CalledProcessError as e:
if kwargs.get('raise_on_status', True):
raise e
output = e.output
status = e.returncode
except OSError as e: # command not found
if kwargs.get('raise_on_status', True):
raise e
if 'stderr' in kwargs:
kwargs['stderr'].write(e.message)
return -1, ""
if six.PY3:
output = output.decode('utf8')
return status, output | Execute shell command and return output
Args:
cmd (str): the command itself, i.e. part until the first space
*args: positional arguments, i.e. other space-separated parts
rel_path (bool): execute relative to the path (default: `False`)
raise_on_status(bool): bool, raise exception if command
exited with non-zero status (default: `True`)
stderr (file-like): file-like object to collect stderr output,
None by default
Returns:
Tuple[int, str]: status, shell output | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/sysutils.py#L27-L64 | null |
"""
Operations with files
"""
import six
import os
import subprocess
def mkdir(*args):
"""Create a directory specified by a sequence of subdirectories
>>> mkdir("/tmp", "foo", "bar", "baz")
'/tmp/foo/bar/baz'
>>> os.path.isdir('/tmp/foo/bar/baz')
True
"""
path = ''
for chunk in args:
path = os.path.join(path, chunk)
if not os.path.isdir(path):
os.mkdir(path)
return path
def raw_filesize(path):
# type: (str) -> Optional[int]
""" Get size of a file/directory in bytes.
Will return None if path does not exist or cannot be accessed.
"""
with open('/dev/null', 'w') as devnull:
status, output = shell("du", "-bs", path, raise_on_status=False,
stderr=devnull)
if status != 0:
return None
# output is: <size>\t<path>\n
return int(output.split("\t", 1)[0])
|
CMUSTRUDEL/strudel.utils | stutils/sysutils.py | raw_filesize | python | def raw_filesize(path):
# type: (str) -> Optional[int]
with open('/dev/null', 'w') as devnull:
status, output = shell("du", "-bs", path, raise_on_status=False,
stderr=devnull)
if status != 0:
return None
# output is: <size>\t<path>\n
return int(output.split("\t", 1)[0]) | Get size of a file/directory in bytes.
Will return None if path does not exist or cannot be accessed. | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/sysutils.py#L67-L79 | [
"def shell(cmd, *args, **kwargs):\n # type: (Union[str, unicode], *Union[str, unicode], **Any) ->Tuple[int, str]\n \"\"\" Execute shell command and return output\n\n Args:\n cmd (str): the command itself, i.e. part until the first space\n *args: positional arguments, i.e. other space-separated parts\n rel_path (bool): execute relative to the path (default: `False`)\n raise_on_status(bool): bool, raise exception if command\n exited with non-zero status (default: `True`)\n stderr (file-like): file-like object to collect stderr output,\n None by default\n\n Returns:\n Tuple[int, str]: status, shell output\n\n \"\"\"\n if kwargs.get('rel_path') and not cmd.startswith(\"/\"):\n cmd = os.path.join(kwargs['rel_path'], cmd)\n status = 0\n try:\n output = subprocess.check_output(\n (cmd,) + args, stderr=kwargs.get('stderr'))\n except subprocess.CalledProcessError as e:\n if kwargs.get('raise_on_status', True):\n raise e\n output = e.output\n status = e.returncode\n except OSError as e: # command not found\n if kwargs.get('raise_on_status', True):\n raise e\n if 'stderr' in kwargs:\n kwargs['stderr'].write(e.message)\n return -1, \"\"\n\n if six.PY3:\n output = output.decode('utf8')\n return status, output\n"
] |
"""
Operations with files
"""
import six
import os
import subprocess
def mkdir(*args):
"""Create a directory specified by a sequence of subdirectories
>>> mkdir("/tmp", "foo", "bar", "baz")
'/tmp/foo/bar/baz'
>>> os.path.isdir('/tmp/foo/bar/baz')
True
"""
path = ''
for chunk in args:
path = os.path.join(path, chunk)
if not os.path.isdir(path):
os.mkdir(path)
return path
def shell(cmd, *args, **kwargs):
# type: (Union[str, unicode], *Union[str, unicode], **Any) ->Tuple[int, str]
""" Execute shell command and return output
Args:
cmd (str): the command itself, i.e. part until the first space
*args: positional arguments, i.e. other space-separated parts
rel_path (bool): execute relative to the path (default: `False`)
raise_on_status(bool): bool, raise exception if command
exited with non-zero status (default: `True`)
stderr (file-like): file-like object to collect stderr output,
None by default
Returns:
Tuple[int, str]: status, shell output
"""
if kwargs.get('rel_path') and not cmd.startswith("/"):
cmd = os.path.join(kwargs['rel_path'], cmd)
status = 0
try:
output = subprocess.check_output(
(cmd,) + args, stderr=kwargs.get('stderr'))
except subprocess.CalledProcessError as e:
if kwargs.get('raise_on_status', True):
raise e
output = e.output
status = e.returncode
except OSError as e: # command not found
if kwargs.get('raise_on_status', True):
raise e
if 'stderr' in kwargs:
kwargs['stderr'].write(e.message)
return -1, ""
if six.PY3:
output = output.decode('utf8')
return status, output
|
CMUSTRUDEL/strudel.utils | stutils/versions.py | parse | python | def parse(version):
# type: (Union[str, unicode]) -> list
chunks = []
for chunk in re.findall(r"(\d+|[A-Za-z]\w*)", version):
try:
chunk = int(chunk)
except ValueError:
pass
chunks.append(chunk)
return chunks | Transform version string into comparable list
:param version: version string, e.g. 0.11.23rc1
:return: list of version chunks, e.g. [0, 11, 23, 'rc1']
>>> parse("1")
[1]
>>> parse("0.0.1")
[0, 0, 1]
>>> parse("0.11.23rc1")
[0, 11, 23, 'rc1'] | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/versions.py#L22-L42 | null |
import re
def is_alpha(version):
# type: (Union[str, unicode]) -> bool
""" Check whether the provided version is not a stable release
This method is looking if version matches to digits and dots only
:param version: str, version string
:return: bool
>>> is_alpha("1.0.0")
False
>>> is_alpha("1.0rc1")
True
>>> is_alpha("0.0.0.0")
False
"""
return not re.match(r"^\d+(\.\d+)*$", version.strip())
def compare(ver1, ver2):
# type: (Union[str, unicode], Union[str, unicode]) -> int
"""Compares two version string, returning {-1|0|1} just as cmp().
(-1: ver1 < ver2, 0: ver1==ver2, 1: ver1 > ver2)
>>> compare("0.1.1", "0.1.2")
-1
>>> compare("0.1.2", "0.1.1")
1
>>> compare("0.1", "0.1.1")
0
>>> compare("0.1.1rc1", "0.1.1a")
1
>>> compare("0.1.1rc1", "0.1.1")
-1
"""
chunks1 = parse(str(ver1))
chunks2 = parse(str(ver2))
min_len = min(len(chunks1), len(chunks2))
for i in range(min_len):
if chunks1[i] > chunks2[i]:
return 1
elif chunks1[i] < chunks2[i]:
return -1
if len(chunks1) > min_len and isinstance(chunks1[min_len], str):
return -1
if len(chunks2) > min_len and isinstance(chunks2[min_len], str):
return 1
return 0
|
CMUSTRUDEL/strudel.utils | stutils/versions.py | compare | python | def compare(ver1, ver2):
# type: (Union[str, unicode], Union[str, unicode]) -> int
chunks1 = parse(str(ver1))
chunks2 = parse(str(ver2))
min_len = min(len(chunks1), len(chunks2))
for i in range(min_len):
if chunks1[i] > chunks2[i]:
return 1
elif chunks1[i] < chunks2[i]:
return -1
if len(chunks1) > min_len and isinstance(chunks1[min_len], str):
return -1
if len(chunks2) > min_len and isinstance(chunks2[min_len], str):
return 1
return 0 | Compares two version string, returning {-1|0|1} just as cmp().
(-1: ver1 < ver2, 0: ver1==ver2, 1: ver1 > ver2)
>>> compare("0.1.1", "0.1.2")
-1
>>> compare("0.1.2", "0.1.1")
1
>>> compare("0.1", "0.1.1")
0
>>> compare("0.1.1rc1", "0.1.1a")
1
>>> compare("0.1.1rc1", "0.1.1")
-1 | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/versions.py#L45-L73 | [
"def parse(version):\n # type: (Union[str, unicode]) -> list\n \"\"\" Transform version string into comparable list\n :param version: version string, e.g. 0.11.23rc1\n :return: list of version chunks, e.g. [0, 11, 23, 'rc1']\n\n >>> parse(\"1\")\n [1]\n >>> parse(\"0.0.1\")\n [0, 0, 1]\n >>> parse(\"0.11.23rc1\")\n [0, 11, 23, 'rc1']\n \"\"\"\n chunks = []\n for chunk in re.findall(r\"(\\d+|[A-Za-z]\\w*)\", version):\n try:\n chunk = int(chunk)\n except ValueError:\n pass\n chunks.append(chunk)\n return chunks\n"
] |
import re
def is_alpha(version):
# type: (Union[str, unicode]) -> bool
""" Check whether the provided version is not a stable release
This method is looking if version matches to digits and dots only
:param version: str, version string
:return: bool
>>> is_alpha("1.0.0")
False
>>> is_alpha("1.0rc1")
True
>>> is_alpha("0.0.0.0")
False
"""
return not re.match(r"^\d+(\.\d+)*$", version.strip())
def parse(version):
# type: (Union[str, unicode]) -> list
""" Transform version string into comparable list
:param version: version string, e.g. 0.11.23rc1
:return: list of version chunks, e.g. [0, 11, 23, 'rc1']
>>> parse("1")
[1]
>>> parse("0.0.1")
[0, 0, 1]
>>> parse("0.11.23rc1")
[0, 11, 23, 'rc1']
"""
chunks = []
for chunk in re.findall(r"(\d+|[A-Za-z]\w*)", version):
try:
chunk = int(chunk)
except ValueError:
pass
chunks.append(chunk)
return chunks
|
CMUSTRUDEL/strudel.utils | stutils/email_utils.py | parse | python | def parse(raw_email):
# type: (six.string_types) -> Tuple[six.string_types, six.string_types]
if not isinstance(raw_email, six.string_types):
raise InvalidEmail("Invalid email: %s" % raw_email)
if not raw_email or pd.isnull(raw_email):
raise InvalidEmail("None or NaN is not a valid email address")
email = raw_email.split("<", 1)[-1].split(">", 1)[0]
chunks = email.split("@", 3)
# git-svn generates emails with several @, e.g.:
# <rossberg@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
if len(chunks) < 2:
raise InvalidEmail("Invalid email")
uname = chunks[0].rsplit(" ", 1)[-1]
addr_domain = chunks[1].split(" ", 1)[0]
return uname.split("+", 1)[0], addr_domain | Extract email from a full address. Example:
'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> parse("John Doe <me+github.com@someorg.com")
('me', 'someorg.com')
>>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'Invalid email: 42'
>>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'None or NaN is not a valid email address' | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/email_utils.py#L14-L43 | null |
import os
import pandas as pd
import six
from stutils.decorators import memoize
class InvalidEmail(ValueError):
pass
def clean(raw_email):
# type: (six.string_types) -> Optional[str]
"""Extract email from a full address.
Example: 'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> clean(42) is None
True
>>> clean("John Doe me@someorg.com")
'me@someorg.com'
"""
try:
return "%s@%s" % parse(raw_email)
except InvalidEmail:
return None
def domain(raw_email):
# type: (Union[str, unicode]) -> Optional[str]
""" Extract email domain from a raw email address.
Returns None if the address is invalid
>>> domain("John Doe <test@dep.uni.edu>")
'dep.uni.edu'
>>> domain("Missing test@dep.uni.edu@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>")
'dep.uni.edu'
"""
try:
return parse(raw_email)[-1]
except InvalidEmail:
return None
@memoize
def university_domains():
# type: () -> set
""" Return list of university domains outside of .edu TLD
NOTE: only 2nd level domain is returned, i.e. for aaa.bbb.uk only bbbl.uk
will be returned. This is necessary since many universities use
departmenntal domains, like cs.cmu.edu or andrew.cmu.edu
NOTE2: .edu domains are not included into this list as they're considered
belonging to universities by default.
How to get the original CSV:
```python
x = requests.get(
"https://raw.githubusercontent.com/Hipo/university-domains-list/"
"master/world_universities_and_domains.json").json()
domains = set(ds for u in x
for ds in u['domains'] if not "edu" in ds.rsplit(".", 2)[-2:])
domains = list(domains)
pd.Series(domains, index=domains, name="domain"
).drop(
["chat.ru"]
).to_csv("email_university_domains.csv", index=False)
```
"""
fpath = os.path.join(
os.path.dirname(__file__), "email_university_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def public_domains():
# type: () -> set
""" Return list of public email domains (i.e. offering free mailboxes)
How to get the original CSV:
x = requests.get(
"https://gist.githubusercontent.com/tbrianjones/5992856/raw/"
"87f527af7bdd21997722fa65143a9af7bee92583/"
"free_email_provider_domains.txt").text.split()
# manually coded
x.extend([
'gmail.com', 'users.noreply.github.com', 'hotmail.com',
'googlemail.com', 'users.sourceforge.net', 'iki.fi',
'yahoo.com', 'me.com', 'gmx.de', 'cihar.com', 'ya.ru',
'yandex.ru', 'outlook.com', 'gmx.net', 'web.de', 'pobox.com',
'yahoo.co.uk', 'qq.com', 'free.fr', 'icloud.com', '163.com',
'50mail.com', 'live.com', 'lavabit.com', 'mail.ru', '126.com',
'yahoo.fr', 'seznam.cz'
])
domains = list(set(x)) # make it unique
pd.Series(domains, index=domains, name="domain"
).drop( # mistakenly labeled as public
["unican.es"]
).to_csv("email_public_domains.csv", index=False)
>>> 'gmail.com' in public_domains()
True
>>> '163.com' in public_domains()
True
>>> 'qq.com' in public_domains()
True
>>> 'jaraco.com' in public_domains()
False
"""
fpath = os.path.join(os.path.dirname(__file__), "email_public_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def domain_user_stats():
# type: () -> pd.Series
""" Get number of distinct email addresses in observed domains
TODO: get up to date with new projects layout
How to build email_domain_users.csv:
from collections import defaultdict
import logging
from common import utils as common
import stscraper as scraper
log = logging.getLogger("domain_user_stats")
stats = defaultdict(set)
for ecosystem in common.ECOSYSTEMS:
urls = common.package_urls(ecosystem)
for package_name, url in urls.items():
log.info(package_name)
try:
cs = scraper.commits(url)
except scraper.RepoDoesNotExist:
continue
for email_addr in cs["author_email"].dropna().unique():
if not email_addr or pd.isnull(email_addr):
continue
try:
user, email_domain = clean(email_addr).split("@")
except InvalidEmail:
continue
stats[email_domain].add(user)
s = pd.Series({dm: len(users) for dm, users in stats.items()})
s = s.rename("users").sort_values(ascending=False)
s.to_csv("common/email_domain_users.csv", encoding="utf8", header=True)
return s
"""
fname = os.path.join(os.path.dirname(__file__), "email_domain_users.csv")
stats = pd.read_csv(fname, header=0, squeeze=True, index_col=0)
return stats[pd.notnull(stats.index)]
@memoize
def commercial_domains():
# type: () -> set
""" Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False
"""
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index)
def is_university(addr):
# type: (Union[str, unicode]) -> bool
""" Check if provided email has a university domain
- either in .edu domain
(except public sercices like england.edu or australia.edu)
- or in .edu.TLD (non-US based institutions, like edu.au)
- or listed in a public list of universities
since universities often have department addresses as well, only the end
is matched. E.g. cs.cmu.edu will match cmu.edu
:param addr: email address
:return: bool
>>> is_university("john@cmu.edu")
True
>>> is_university("john@gmail.com")
False
"""
addr_domain = domain(addr)
if not addr_domain: # invalid email
return False
chunks = addr_domain.split(".")
if len(chunks) < 2: # local or invalid address
return False
domains = university_domains()
# many universitites have departmental emails, such as cs.cmu.edu. However,
# the original dataset only has top level domain (cmu.edu). So, what we need
# to do is to strip leading subdomains until match or nothing to strip:
# isri.cs.cmu.edu (no match) -> cs.cmu.edu (no match) -> cmu.edu (match)
return (chunks[-1] == "edu" and chunks[-2] not in ("england", "australia"))\
or chunks[-2] == "edu" \
or any(".".join(chunks[i:]) in domains for i in range(len(chunks)-1))
def is_public(addr):
# type: (Union[str, unicode]) -> bool
""" Check if the passed email registered at a free pubic mail server
:param addr: email address to check
:return: bool
>>> is_public("john@cmu.edu")
False
>>> is_public("john@gmail.com")
True
"""
addr_domain = domain(addr)
if not addr_domain:
# anybody can use invalid email
return True
chunks = addr_domain.rsplit(".", 1)
return len(chunks) < 2 \
or addr_domain.endswith("local") \
or addr_domain in public_domains()
def is_commercial(addr):
"""
>>> is_commercial("test@google.com")
True
>>> is_commercial("test@microsoft.com")
True
>>> is_commercial("test@jaraco.com")
False
"""
addr_domain = domain(addr)
return addr_domain and addr_domain in commercial_domains()
def is_commercial_bulk(addr_series):
domains = commercial_domains()
return addr_series.map(domain).map(lambda addr: addr in domains)
def is_public_bulk(addr_series):
return addr_series.map(lambda addr: is_public(addr))
def is_university_bulk(addr_series):
# type: (pd.Series) -> pd.Series
""" Since university subdomains have to be matched by parts, we can't use
bulk_check. Consider caching this call
"""
return addr_series.map(lambda addr: is_university(addr))
|
CMUSTRUDEL/strudel.utils | stutils/email_utils.py | university_domains | python | def university_domains():
# type: () -> set
fpath = os.path.join(
os.path.dirname(__file__), "email_university_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh) | Return list of university domains outside of .edu TLD
NOTE: only 2nd level domain is returned, i.e. for aaa.bbb.uk only bbbl.uk
will be returned. This is necessary since many universities use
departmenntal domains, like cs.cmu.edu or andrew.cmu.edu
NOTE2: .edu domains are not included into this list as they're considered
belonging to universities by default.
How to get the original CSV:
```python
x = requests.get(
"https://raw.githubusercontent.com/Hipo/university-domains-list/"
"master/world_universities_and_domains.json").json()
domains = set(ds for u in x
for ds in u['domains'] if not "edu" in ds.rsplit(".", 2)[-2:])
domains = list(domains)
pd.Series(domains, index=domains, name="domain"
).drop(
["chat.ru"]
).to_csv("email_university_domains.csv", index=False)
``` | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/email_utils.py#L79-L105 | null |
import os
import pandas as pd
import six
from stutils.decorators import memoize
class InvalidEmail(ValueError):
pass
def parse(raw_email):
# type: (six.string_types) -> Tuple[six.string_types, six.string_types]
"""Extract email from a full address. Example:
'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> parse("John Doe <me+github.com@someorg.com")
('me', 'someorg.com')
>>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'Invalid email: 42'
>>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'None or NaN is not a valid email address'
"""
if not isinstance(raw_email, six.string_types):
raise InvalidEmail("Invalid email: %s" % raw_email)
if not raw_email or pd.isnull(raw_email):
raise InvalidEmail("None or NaN is not a valid email address")
email = raw_email.split("<", 1)[-1].split(">", 1)[0]
chunks = email.split("@", 3)
# git-svn generates emails with several @, e.g.:
# <rossberg@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
if len(chunks) < 2:
raise InvalidEmail("Invalid email")
uname = chunks[0].rsplit(" ", 1)[-1]
addr_domain = chunks[1].split(" ", 1)[0]
return uname.split("+", 1)[0], addr_domain
def clean(raw_email):
# type: (six.string_types) -> Optional[str]
"""Extract email from a full address.
Example: 'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> clean(42) is None
True
>>> clean("John Doe me@someorg.com")
'me@someorg.com'
"""
try:
return "%s@%s" % parse(raw_email)
except InvalidEmail:
return None
def domain(raw_email):
# type: (Union[str, unicode]) -> Optional[str]
""" Extract email domain from a raw email address.
Returns None if the address is invalid
>>> domain("John Doe <test@dep.uni.edu>")
'dep.uni.edu'
>>> domain("Missing test@dep.uni.edu@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>")
'dep.uni.edu'
"""
try:
return parse(raw_email)[-1]
except InvalidEmail:
return None
@memoize
@memoize
def public_domains():
# type: () -> set
""" Return list of public email domains (i.e. offering free mailboxes)
How to get the original CSV:
x = requests.get(
"https://gist.githubusercontent.com/tbrianjones/5992856/raw/"
"87f527af7bdd21997722fa65143a9af7bee92583/"
"free_email_provider_domains.txt").text.split()
# manually coded
x.extend([
'gmail.com', 'users.noreply.github.com', 'hotmail.com',
'googlemail.com', 'users.sourceforge.net', 'iki.fi',
'yahoo.com', 'me.com', 'gmx.de', 'cihar.com', 'ya.ru',
'yandex.ru', 'outlook.com', 'gmx.net', 'web.de', 'pobox.com',
'yahoo.co.uk', 'qq.com', 'free.fr', 'icloud.com', '163.com',
'50mail.com', 'live.com', 'lavabit.com', 'mail.ru', '126.com',
'yahoo.fr', 'seznam.cz'
])
domains = list(set(x)) # make it unique
pd.Series(domains, index=domains, name="domain"
).drop( # mistakenly labeled as public
["unican.es"]
).to_csv("email_public_domains.csv", index=False)
>>> 'gmail.com' in public_domains()
True
>>> '163.com' in public_domains()
True
>>> 'qq.com' in public_domains()
True
>>> 'jaraco.com' in public_domains()
False
"""
fpath = os.path.join(os.path.dirname(__file__), "email_public_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def domain_user_stats():
# type: () -> pd.Series
""" Get number of distinct email addresses in observed domains
TODO: get up to date with new projects layout
How to build email_domain_users.csv:
from collections import defaultdict
import logging
from common import utils as common
import stscraper as scraper
log = logging.getLogger("domain_user_stats")
stats = defaultdict(set)
for ecosystem in common.ECOSYSTEMS:
urls = common.package_urls(ecosystem)
for package_name, url in urls.items():
log.info(package_name)
try:
cs = scraper.commits(url)
except scraper.RepoDoesNotExist:
continue
for email_addr in cs["author_email"].dropna().unique():
if not email_addr or pd.isnull(email_addr):
continue
try:
user, email_domain = clean(email_addr).split("@")
except InvalidEmail:
continue
stats[email_domain].add(user)
s = pd.Series({dm: len(users) for dm, users in stats.items()})
s = s.rename("users").sort_values(ascending=False)
s.to_csv("common/email_domain_users.csv", encoding="utf8", header=True)
return s
"""
fname = os.path.join(os.path.dirname(__file__), "email_domain_users.csv")
stats = pd.read_csv(fname, header=0, squeeze=True, index_col=0)
return stats[pd.notnull(stats.index)]
@memoize
def commercial_domains():
# type: () -> set
""" Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False
"""
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index)
def is_university(addr):
# type: (Union[str, unicode]) -> bool
""" Check if provided email has a university domain
- either in .edu domain
(except public sercices like england.edu or australia.edu)
- or in .edu.TLD (non-US based institutions, like edu.au)
- or listed in a public list of universities
since universities often have department addresses as well, only the end
is matched. E.g. cs.cmu.edu will match cmu.edu
:param addr: email address
:return: bool
>>> is_university("john@cmu.edu")
True
>>> is_university("john@gmail.com")
False
"""
addr_domain = domain(addr)
if not addr_domain: # invalid email
return False
chunks = addr_domain.split(".")
if len(chunks) < 2: # local or invalid address
return False
domains = university_domains()
# many universitites have departmental emails, such as cs.cmu.edu. However,
# the original dataset only has top level domain (cmu.edu). So, what we need
# to do is to strip leading subdomains until match or nothing to strip:
# isri.cs.cmu.edu (no match) -> cs.cmu.edu (no match) -> cmu.edu (match)
return (chunks[-1] == "edu" and chunks[-2] not in ("england", "australia"))\
or chunks[-2] == "edu" \
or any(".".join(chunks[i:]) in domains for i in range(len(chunks)-1))
def is_public(addr):
# type: (Union[str, unicode]) -> bool
""" Check if the passed email registered at a free pubic mail server
:param addr: email address to check
:return: bool
>>> is_public("john@cmu.edu")
False
>>> is_public("john@gmail.com")
True
"""
addr_domain = domain(addr)
if not addr_domain:
# anybody can use invalid email
return True
chunks = addr_domain.rsplit(".", 1)
return len(chunks) < 2 \
or addr_domain.endswith("local") \
or addr_domain in public_domains()
def is_commercial(addr):
"""
>>> is_commercial("test@google.com")
True
>>> is_commercial("test@microsoft.com")
True
>>> is_commercial("test@jaraco.com")
False
"""
addr_domain = domain(addr)
return addr_domain and addr_domain in commercial_domains()
def is_commercial_bulk(addr_series):
domains = commercial_domains()
return addr_series.map(domain).map(lambda addr: addr in domains)
def is_public_bulk(addr_series):
return addr_series.map(lambda addr: is_public(addr))
def is_university_bulk(addr_series):
# type: (pd.Series) -> pd.Series
""" Since university subdomains have to be matched by parts, we can't use
bulk_check. Consider caching this call
"""
return addr_series.map(lambda addr: is_university(addr))
|
CMUSTRUDEL/strudel.utils | stutils/email_utils.py | domain_user_stats | python | def domain_user_stats():
# type: () -> pd.Series
fname = os.path.join(os.path.dirname(__file__), "email_domain_users.csv")
stats = pd.read_csv(fname, header=0, squeeze=True, index_col=0)
return stats[pd.notnull(stats.index)] | Get number of distinct email addresses in observed domains
TODO: get up to date with new projects layout
How to build email_domain_users.csv:
from collections import defaultdict
import logging
from common import utils as common
import stscraper as scraper
log = logging.getLogger("domain_user_stats")
stats = defaultdict(set)
for ecosystem in common.ECOSYSTEMS:
urls = common.package_urls(ecosystem)
for package_name, url in urls.items():
log.info(package_name)
try:
cs = scraper.commits(url)
except scraper.RepoDoesNotExist:
continue
for email_addr in cs["author_email"].dropna().unique():
if not email_addr or pd.isnull(email_addr):
continue
try:
user, email_domain = clean(email_addr).split("@")
except InvalidEmail:
continue
stats[email_domain].add(user)
s = pd.Series({dm: len(users) for dm, users in stats.items()})
s = s.rename("users").sort_values(ascending=False)
s.to_csv("common/email_domain_users.csv", encoding="utf8", header=True)
return s | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/email_utils.py#L149-L186 | null |
import os
import pandas as pd
import six
from stutils.decorators import memoize
class InvalidEmail(ValueError):
pass
def parse(raw_email):
# type: (six.string_types) -> Tuple[six.string_types, six.string_types]
"""Extract email from a full address. Example:
'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> parse("John Doe <me+github.com@someorg.com")
('me', 'someorg.com')
>>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'Invalid email: 42'
>>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'None or NaN is not a valid email address'
"""
if not isinstance(raw_email, six.string_types):
raise InvalidEmail("Invalid email: %s" % raw_email)
if not raw_email or pd.isnull(raw_email):
raise InvalidEmail("None or NaN is not a valid email address")
email = raw_email.split("<", 1)[-1].split(">", 1)[0]
chunks = email.split("@", 3)
# git-svn generates emails with several @, e.g.:
# <rossberg@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
if len(chunks) < 2:
raise InvalidEmail("Invalid email")
uname = chunks[0].rsplit(" ", 1)[-1]
addr_domain = chunks[1].split(" ", 1)[0]
return uname.split("+", 1)[0], addr_domain
def clean(raw_email):
# type: (six.string_types) -> Optional[str]
"""Extract email from a full address.
Example: 'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> clean(42) is None
True
>>> clean("John Doe me@someorg.com")
'me@someorg.com'
"""
try:
return "%s@%s" % parse(raw_email)
except InvalidEmail:
return None
def domain(raw_email):
# type: (Union[str, unicode]) -> Optional[str]
""" Extract email domain from a raw email address.
Returns None if the address is invalid
>>> domain("John Doe <test@dep.uni.edu>")
'dep.uni.edu'
>>> domain("Missing test@dep.uni.edu@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>")
'dep.uni.edu'
"""
try:
return parse(raw_email)[-1]
except InvalidEmail:
return None
@memoize
def university_domains():
# type: () -> set
""" Return list of university domains outside of .edu TLD
NOTE: only 2nd level domain is returned, i.e. for aaa.bbb.uk only bbbl.uk
will be returned. This is necessary since many universities use
departmenntal domains, like cs.cmu.edu or andrew.cmu.edu
NOTE2: .edu domains are not included into this list as they're considered
belonging to universities by default.
How to get the original CSV:
```python
x = requests.get(
"https://raw.githubusercontent.com/Hipo/university-domains-list/"
"master/world_universities_and_domains.json").json()
domains = set(ds for u in x
for ds in u['domains'] if not "edu" in ds.rsplit(".", 2)[-2:])
domains = list(domains)
pd.Series(domains, index=domains, name="domain"
).drop(
["chat.ru"]
).to_csv("email_university_domains.csv", index=False)
```
"""
fpath = os.path.join(
os.path.dirname(__file__), "email_university_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def public_domains():
# type: () -> set
""" Return list of public email domains (i.e. offering free mailboxes)
How to get the original CSV:
x = requests.get(
"https://gist.githubusercontent.com/tbrianjones/5992856/raw/"
"87f527af7bdd21997722fa65143a9af7bee92583/"
"free_email_provider_domains.txt").text.split()
# manually coded
x.extend([
'gmail.com', 'users.noreply.github.com', 'hotmail.com',
'googlemail.com', 'users.sourceforge.net', 'iki.fi',
'yahoo.com', 'me.com', 'gmx.de', 'cihar.com', 'ya.ru',
'yandex.ru', 'outlook.com', 'gmx.net', 'web.de', 'pobox.com',
'yahoo.co.uk', 'qq.com', 'free.fr', 'icloud.com', '163.com',
'50mail.com', 'live.com', 'lavabit.com', 'mail.ru', '126.com',
'yahoo.fr', 'seznam.cz'
])
domains = list(set(x)) # make it unique
pd.Series(domains, index=domains, name="domain"
).drop( # mistakenly labeled as public
["unican.es"]
).to_csv("email_public_domains.csv", index=False)
>>> 'gmail.com' in public_domains()
True
>>> '163.com' in public_domains()
True
>>> 'qq.com' in public_domains()
True
>>> 'jaraco.com' in public_domains()
False
"""
fpath = os.path.join(os.path.dirname(__file__), "email_public_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
@memoize
def commercial_domains():
# type: () -> set
""" Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False
"""
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index)
def is_university(addr):
# type: (Union[str, unicode]) -> bool
""" Check if provided email has a university domain
- either in .edu domain
(except public sercices like england.edu or australia.edu)
- or in .edu.TLD (non-US based institutions, like edu.au)
- or listed in a public list of universities
since universities often have department addresses as well, only the end
is matched. E.g. cs.cmu.edu will match cmu.edu
:param addr: email address
:return: bool
>>> is_university("john@cmu.edu")
True
>>> is_university("john@gmail.com")
False
"""
addr_domain = domain(addr)
if not addr_domain: # invalid email
return False
chunks = addr_domain.split(".")
if len(chunks) < 2: # local or invalid address
return False
domains = university_domains()
# many universitites have departmental emails, such as cs.cmu.edu. However,
# the original dataset only has top level domain (cmu.edu). So, what we need
# to do is to strip leading subdomains until match or nothing to strip:
# isri.cs.cmu.edu (no match) -> cs.cmu.edu (no match) -> cmu.edu (match)
return (chunks[-1] == "edu" and chunks[-2] not in ("england", "australia"))\
or chunks[-2] == "edu" \
or any(".".join(chunks[i:]) in domains for i in range(len(chunks)-1))
def is_public(addr):
# type: (Union[str, unicode]) -> bool
""" Check if the passed email registered at a free pubic mail server
:param addr: email address to check
:return: bool
>>> is_public("john@cmu.edu")
False
>>> is_public("john@gmail.com")
True
"""
addr_domain = domain(addr)
if not addr_domain:
# anybody can use invalid email
return True
chunks = addr_domain.rsplit(".", 1)
return len(chunks) < 2 \
or addr_domain.endswith("local") \
or addr_domain in public_domains()
def is_commercial(addr):
"""
>>> is_commercial("test@google.com")
True
>>> is_commercial("test@microsoft.com")
True
>>> is_commercial("test@jaraco.com")
False
"""
addr_domain = domain(addr)
return addr_domain and addr_domain in commercial_domains()
def is_commercial_bulk(addr_series):
domains = commercial_domains()
return addr_series.map(domain).map(lambda addr: addr in domains)
def is_public_bulk(addr_series):
return addr_series.map(lambda addr: is_public(addr))
def is_university_bulk(addr_series):
# type: (pd.Series) -> pd.Series
""" Since university subdomains have to be matched by parts, we can't use
bulk_check. Consider caching this call
"""
return addr_series.map(lambda addr: is_university(addr))
|
CMUSTRUDEL/strudel.utils | stutils/email_utils.py | commercial_domains | python | def commercial_domains():
# type: () -> set
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index) | Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/email_utils.py#L190-L208 | [
"def is_public_bulk(addr_series):\n return addr_series.map(lambda addr: is_public(addr))\n",
"def is_university_bulk(addr_series):\n # type: (pd.Series) -> pd.Series\n \"\"\" Since university subdomains have to be matched by parts, we can't use\n bulk_check. Consider caching this call\n \"\"\"\n return addr_series.map(lambda addr: is_university(addr))\n"
] |
import os
import pandas as pd
import six
from stutils.decorators import memoize
class InvalidEmail(ValueError):
pass
def parse(raw_email):
# type: (six.string_types) -> Tuple[six.string_types, six.string_types]
"""Extract email from a full address. Example:
'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> parse("John Doe <me+github.com@someorg.com")
('me', 'someorg.com')
>>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'Invalid email: 42'
>>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'None or NaN is not a valid email address'
"""
if not isinstance(raw_email, six.string_types):
raise InvalidEmail("Invalid email: %s" % raw_email)
if not raw_email or pd.isnull(raw_email):
raise InvalidEmail("None or NaN is not a valid email address")
email = raw_email.split("<", 1)[-1].split(">", 1)[0]
chunks = email.split("@", 3)
# git-svn generates emails with several @, e.g.:
# <rossberg@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
if len(chunks) < 2:
raise InvalidEmail("Invalid email")
uname = chunks[0].rsplit(" ", 1)[-1]
addr_domain = chunks[1].split(" ", 1)[0]
return uname.split("+", 1)[0], addr_domain
def clean(raw_email):
# type: (six.string_types) -> Optional[str]
"""Extract email from a full address.
Example: 'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> clean(42) is None
True
>>> clean("John Doe me@someorg.com")
'me@someorg.com'
"""
try:
return "%s@%s" % parse(raw_email)
except InvalidEmail:
return None
def domain(raw_email):
# type: (Union[str, unicode]) -> Optional[str]
""" Extract email domain from a raw email address.
Returns None if the address is invalid
>>> domain("John Doe <test@dep.uni.edu>")
'dep.uni.edu'
>>> domain("Missing test@dep.uni.edu@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>")
'dep.uni.edu'
"""
try:
return parse(raw_email)[-1]
except InvalidEmail:
return None
@memoize
def university_domains():
# type: () -> set
""" Return list of university domains outside of .edu TLD
NOTE: only 2nd level domain is returned, i.e. for aaa.bbb.uk only bbbl.uk
will be returned. This is necessary since many universities use
departmenntal domains, like cs.cmu.edu or andrew.cmu.edu
NOTE2: .edu domains are not included into this list as they're considered
belonging to universities by default.
How to get the original CSV:
```python
x = requests.get(
"https://raw.githubusercontent.com/Hipo/university-domains-list/"
"master/world_universities_and_domains.json").json()
domains = set(ds for u in x
for ds in u['domains'] if not "edu" in ds.rsplit(".", 2)[-2:])
domains = list(domains)
pd.Series(domains, index=domains, name="domain"
).drop(
["chat.ru"]
).to_csv("email_university_domains.csv", index=False)
```
"""
fpath = os.path.join(
os.path.dirname(__file__), "email_university_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def public_domains():
# type: () -> set
""" Return list of public email domains (i.e. offering free mailboxes)
How to get the original CSV:
x = requests.get(
"https://gist.githubusercontent.com/tbrianjones/5992856/raw/"
"87f527af7bdd21997722fa65143a9af7bee92583/"
"free_email_provider_domains.txt").text.split()
# manually coded
x.extend([
'gmail.com', 'users.noreply.github.com', 'hotmail.com',
'googlemail.com', 'users.sourceforge.net', 'iki.fi',
'yahoo.com', 'me.com', 'gmx.de', 'cihar.com', 'ya.ru',
'yandex.ru', 'outlook.com', 'gmx.net', 'web.de', 'pobox.com',
'yahoo.co.uk', 'qq.com', 'free.fr', 'icloud.com', '163.com',
'50mail.com', 'live.com', 'lavabit.com', 'mail.ru', '126.com',
'yahoo.fr', 'seznam.cz'
])
domains = list(set(x)) # make it unique
pd.Series(domains, index=domains, name="domain"
).drop( # mistakenly labeled as public
["unican.es"]
).to_csv("email_public_domains.csv", index=False)
>>> 'gmail.com' in public_domains()
True
>>> '163.com' in public_domains()
True
>>> 'qq.com' in public_domains()
True
>>> 'jaraco.com' in public_domains()
False
"""
fpath = os.path.join(os.path.dirname(__file__), "email_public_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def domain_user_stats():
# type: () -> pd.Series
""" Get number of distinct email addresses in observed domains
TODO: get up to date with new projects layout
How to build email_domain_users.csv:
from collections import defaultdict
import logging
from common import utils as common
import stscraper as scraper
log = logging.getLogger("domain_user_stats")
stats = defaultdict(set)
for ecosystem in common.ECOSYSTEMS:
urls = common.package_urls(ecosystem)
for package_name, url in urls.items():
log.info(package_name)
try:
cs = scraper.commits(url)
except scraper.RepoDoesNotExist:
continue
for email_addr in cs["author_email"].dropna().unique():
if not email_addr or pd.isnull(email_addr):
continue
try:
user, email_domain = clean(email_addr).split("@")
except InvalidEmail:
continue
stats[email_domain].add(user)
s = pd.Series({dm: len(users) for dm, users in stats.items()})
s = s.rename("users").sort_values(ascending=False)
s.to_csv("common/email_domain_users.csv", encoding="utf8", header=True)
return s
"""
fname = os.path.join(os.path.dirname(__file__), "email_domain_users.csv")
stats = pd.read_csv(fname, header=0, squeeze=True, index_col=0)
return stats[pd.notnull(stats.index)]
@memoize
def is_university(addr):
# type: (Union[str, unicode]) -> bool
""" Check if provided email has a university domain
- either in .edu domain
(except public sercices like england.edu or australia.edu)
- or in .edu.TLD (non-US based institutions, like edu.au)
- or listed in a public list of universities
since universities often have department addresses as well, only the end
is matched. E.g. cs.cmu.edu will match cmu.edu
:param addr: email address
:return: bool
>>> is_university("john@cmu.edu")
True
>>> is_university("john@gmail.com")
False
"""
addr_domain = domain(addr)
if not addr_domain: # invalid email
return False
chunks = addr_domain.split(".")
if len(chunks) < 2: # local or invalid address
return False
domains = university_domains()
# many universitites have departmental emails, such as cs.cmu.edu. However,
# the original dataset only has top level domain (cmu.edu). So, what we need
# to do is to strip leading subdomains until match or nothing to strip:
# isri.cs.cmu.edu (no match) -> cs.cmu.edu (no match) -> cmu.edu (match)
return (chunks[-1] == "edu" and chunks[-2] not in ("england", "australia"))\
or chunks[-2] == "edu" \
or any(".".join(chunks[i:]) in domains for i in range(len(chunks)-1))
def is_public(addr):
# type: (Union[str, unicode]) -> bool
""" Check if the passed email registered at a free pubic mail server
:param addr: email address to check
:return: bool
>>> is_public("john@cmu.edu")
False
>>> is_public("john@gmail.com")
True
"""
addr_domain = domain(addr)
if not addr_domain:
# anybody can use invalid email
return True
chunks = addr_domain.rsplit(".", 1)
return len(chunks) < 2 \
or addr_domain.endswith("local") \
or addr_domain in public_domains()
def is_commercial(addr):
"""
>>> is_commercial("test@google.com")
True
>>> is_commercial("test@microsoft.com")
True
>>> is_commercial("test@jaraco.com")
False
"""
addr_domain = domain(addr)
return addr_domain and addr_domain in commercial_domains()
def is_commercial_bulk(addr_series):
domains = commercial_domains()
return addr_series.map(domain).map(lambda addr: addr in domains)
def is_public_bulk(addr_series):
return addr_series.map(lambda addr: is_public(addr))
def is_university_bulk(addr_series):
# type: (pd.Series) -> pd.Series
""" Since university subdomains have to be matched by parts, we can't use
bulk_check. Consider caching this call
"""
return addr_series.map(lambda addr: is_university(addr))
|
CMUSTRUDEL/strudel.utils | stutils/email_utils.py | is_university | python | def is_university(addr):
# type: (Union[str, unicode]) -> bool
addr_domain = domain(addr)
if not addr_domain: # invalid email
return False
chunks = addr_domain.split(".")
if len(chunks) < 2: # local or invalid address
return False
domains = university_domains()
# many universitites have departmental emails, such as cs.cmu.edu. However,
# the original dataset only has top level domain (cmu.edu). So, what we need
# to do is to strip leading subdomains until match or nothing to strip:
# isri.cs.cmu.edu (no match) -> cs.cmu.edu (no match) -> cmu.edu (match)
return (chunks[-1] == "edu" and chunks[-2] not in ("england", "australia"))\
or chunks[-2] == "edu" \
or any(".".join(chunks[i:]) in domains for i in range(len(chunks)-1)) | Check if provided email has a university domain
- either in .edu domain
(except public sercices like england.edu or australia.edu)
- or in .edu.TLD (non-US based institutions, like edu.au)
- or listed in a public list of universities
since universities often have department addresses as well, only the end
is matched. E.g. cs.cmu.edu will match cmu.edu
:param addr: email address
:return: bool
>>> is_university("john@cmu.edu")
True
>>> is_university("john@gmail.com")
False | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/email_utils.py#L211-L243 | [
"def domain(raw_email):\n # type: (Union[str, unicode]) -> Optional[str]\n \"\"\" Extract email domain from a raw email address.\n Returns None if the address is invalid\n\n >>> domain(\"John Doe <test@dep.uni.edu>\")\n 'dep.uni.edu'\n >>> domain(\"Missing test@dep.uni.edu@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>\")\n 'dep.uni.edu'\n \"\"\"\n try:\n return parse(raw_email)[-1]\n except InvalidEmail:\n return None\n"
] |
import os
import pandas as pd
import six
from stutils.decorators import memoize
class InvalidEmail(ValueError):
pass
def parse(raw_email):
# type: (six.string_types) -> Tuple[six.string_types, six.string_types]
"""Extract email from a full address. Example:
'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> parse("John Doe <me+github.com@someorg.com")
('me', 'someorg.com')
>>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'Invalid email: 42'
>>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'None or NaN is not a valid email address'
"""
if not isinstance(raw_email, six.string_types):
raise InvalidEmail("Invalid email: %s" % raw_email)
if not raw_email or pd.isnull(raw_email):
raise InvalidEmail("None or NaN is not a valid email address")
email = raw_email.split("<", 1)[-1].split(">", 1)[0]
chunks = email.split("@", 3)
# git-svn generates emails with several @, e.g.:
# <rossberg@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
if len(chunks) < 2:
raise InvalidEmail("Invalid email")
uname = chunks[0].rsplit(" ", 1)[-1]
addr_domain = chunks[1].split(" ", 1)[0]
return uname.split("+", 1)[0], addr_domain
def clean(raw_email):
# type: (six.string_types) -> Optional[str]
"""Extract email from a full address.
Example: 'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> clean(42) is None
True
>>> clean("John Doe me@someorg.com")
'me@someorg.com'
"""
try:
return "%s@%s" % parse(raw_email)
except InvalidEmail:
return None
def domain(raw_email):
# type: (Union[str, unicode]) -> Optional[str]
""" Extract email domain from a raw email address.
Returns None if the address is invalid
>>> domain("John Doe <test@dep.uni.edu>")
'dep.uni.edu'
>>> domain("Missing test@dep.uni.edu@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>")
'dep.uni.edu'
"""
try:
return parse(raw_email)[-1]
except InvalidEmail:
return None
@memoize
def university_domains():
# type: () -> set
""" Return list of university domains outside of .edu TLD
NOTE: only 2nd level domain is returned, i.e. for aaa.bbb.uk only bbbl.uk
will be returned. This is necessary since many universities use
departmenntal domains, like cs.cmu.edu or andrew.cmu.edu
NOTE2: .edu domains are not included into this list as they're considered
belonging to universities by default.
How to get the original CSV:
```python
x = requests.get(
"https://raw.githubusercontent.com/Hipo/university-domains-list/"
"master/world_universities_and_domains.json").json()
domains = set(ds for u in x
for ds in u['domains'] if not "edu" in ds.rsplit(".", 2)[-2:])
domains = list(domains)
pd.Series(domains, index=domains, name="domain"
).drop(
["chat.ru"]
).to_csv("email_university_domains.csv", index=False)
```
"""
fpath = os.path.join(
os.path.dirname(__file__), "email_university_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def public_domains():
# type: () -> set
""" Return list of public email domains (i.e. offering free mailboxes)
How to get the original CSV:
x = requests.get(
"https://gist.githubusercontent.com/tbrianjones/5992856/raw/"
"87f527af7bdd21997722fa65143a9af7bee92583/"
"free_email_provider_domains.txt").text.split()
# manually coded
x.extend([
'gmail.com', 'users.noreply.github.com', 'hotmail.com',
'googlemail.com', 'users.sourceforge.net', 'iki.fi',
'yahoo.com', 'me.com', 'gmx.de', 'cihar.com', 'ya.ru',
'yandex.ru', 'outlook.com', 'gmx.net', 'web.de', 'pobox.com',
'yahoo.co.uk', 'qq.com', 'free.fr', 'icloud.com', '163.com',
'50mail.com', 'live.com', 'lavabit.com', 'mail.ru', '126.com',
'yahoo.fr', 'seznam.cz'
])
domains = list(set(x)) # make it unique
pd.Series(domains, index=domains, name="domain"
).drop( # mistakenly labeled as public
["unican.es"]
).to_csv("email_public_domains.csv", index=False)
>>> 'gmail.com' in public_domains()
True
>>> '163.com' in public_domains()
True
>>> 'qq.com' in public_domains()
True
>>> 'jaraco.com' in public_domains()
False
"""
fpath = os.path.join(os.path.dirname(__file__), "email_public_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def domain_user_stats():
# type: () -> pd.Series
""" Get number of distinct email addresses in observed domains
TODO: get up to date with new projects layout
How to build email_domain_users.csv:
from collections import defaultdict
import logging
from common import utils as common
import stscraper as scraper
log = logging.getLogger("domain_user_stats")
stats = defaultdict(set)
for ecosystem in common.ECOSYSTEMS:
urls = common.package_urls(ecosystem)
for package_name, url in urls.items():
log.info(package_name)
try:
cs = scraper.commits(url)
except scraper.RepoDoesNotExist:
continue
for email_addr in cs["author_email"].dropna().unique():
if not email_addr or pd.isnull(email_addr):
continue
try:
user, email_domain = clean(email_addr).split("@")
except InvalidEmail:
continue
stats[email_domain].add(user)
s = pd.Series({dm: len(users) for dm, users in stats.items()})
s = s.rename("users").sort_values(ascending=False)
s.to_csv("common/email_domain_users.csv", encoding="utf8", header=True)
return s
"""
fname = os.path.join(os.path.dirname(__file__), "email_domain_users.csv")
stats = pd.read_csv(fname, header=0, squeeze=True, index_col=0)
return stats[pd.notnull(stats.index)]
@memoize
def commercial_domains():
# type: () -> set
""" Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False
"""
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index)
def is_public(addr):
# type: (Union[str, unicode]) -> bool
""" Check if the passed email registered at a free pubic mail server
:param addr: email address to check
:return: bool
>>> is_public("john@cmu.edu")
False
>>> is_public("john@gmail.com")
True
"""
addr_domain = domain(addr)
if not addr_domain:
# anybody can use invalid email
return True
chunks = addr_domain.rsplit(".", 1)
return len(chunks) < 2 \
or addr_domain.endswith("local") \
or addr_domain in public_domains()
def is_commercial(addr):
"""
>>> is_commercial("test@google.com")
True
>>> is_commercial("test@microsoft.com")
True
>>> is_commercial("test@jaraco.com")
False
"""
addr_domain = domain(addr)
return addr_domain and addr_domain in commercial_domains()
def is_commercial_bulk(addr_series):
domains = commercial_domains()
return addr_series.map(domain).map(lambda addr: addr in domains)
def is_public_bulk(addr_series):
return addr_series.map(lambda addr: is_public(addr))
def is_university_bulk(addr_series):
# type: (pd.Series) -> pd.Series
""" Since university subdomains have to be matched by parts, we can't use
bulk_check. Consider caching this call
"""
return addr_series.map(lambda addr: is_university(addr))
|
CMUSTRUDEL/strudel.utils | stutils/email_utils.py | is_public | python | def is_public(addr):
# type: (Union[str, unicode]) -> bool
addr_domain = domain(addr)
if not addr_domain:
# anybody can use invalid email
return True
chunks = addr_domain.rsplit(".", 1)
return len(chunks) < 2 \
or addr_domain.endswith("local") \
or addr_domain in public_domains() | Check if the passed email registered at a free pubic mail server
:param addr: email address to check
:return: bool
>>> is_public("john@cmu.edu")
False
>>> is_public("john@gmail.com")
True | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/email_utils.py#L246-L265 | [
"def domain(raw_email):\n # type: (Union[str, unicode]) -> Optional[str]\n \"\"\" Extract email domain from a raw email address.\n Returns None if the address is invalid\n\n >>> domain(\"John Doe <test@dep.uni.edu>\")\n 'dep.uni.edu'\n >>> domain(\"Missing test@dep.uni.edu@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>\")\n 'dep.uni.edu'\n \"\"\"\n try:\n return parse(raw_email)[-1]\n except InvalidEmail:\n return None\n"
] |
import os
import pandas as pd
import six
from stutils.decorators import memoize
class InvalidEmail(ValueError):
pass
def parse(raw_email):
# type: (six.string_types) -> Tuple[six.string_types, six.string_types]
"""Extract email from a full address. Example:
'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> parse("John Doe <me+github.com@someorg.com")
('me', 'someorg.com')
>>> parse(42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'Invalid email: 42'
>>> parse(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InvalidEmail: 'None or NaN is not a valid email address'
"""
if not isinstance(raw_email, six.string_types):
raise InvalidEmail("Invalid email: %s" % raw_email)
if not raw_email or pd.isnull(raw_email):
raise InvalidEmail("None or NaN is not a valid email address")
email = raw_email.split("<", 1)[-1].split(">", 1)[0]
chunks = email.split("@", 3)
# git-svn generates emails with several @, e.g.:
# <rossberg@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
if len(chunks) < 2:
raise InvalidEmail("Invalid email")
uname = chunks[0].rsplit(" ", 1)[-1]
addr_domain = chunks[1].split(" ", 1)[0]
return uname.split("+", 1)[0], addr_domain
def clean(raw_email):
# type: (six.string_types) -> Optional[str]
"""Extract email from a full address.
Example: 'John Doe <jdoe+github@foo.com>' -> jdoe@foo.com
>>> clean(42) is None
True
>>> clean("John Doe me@someorg.com")
'me@someorg.com'
"""
try:
return "%s@%s" % parse(raw_email)
except InvalidEmail:
return None
def domain(raw_email):
# type: (Union[str, unicode]) -> Optional[str]
""" Extract email domain from a raw email address.
Returns None if the address is invalid
>>> domain("John Doe <test@dep.uni.edu>")
'dep.uni.edu'
>>> domain("Missing test@dep.uni.edu@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>")
'dep.uni.edu'
"""
try:
return parse(raw_email)[-1]
except InvalidEmail:
return None
@memoize
def university_domains():
# type: () -> set
""" Return list of university domains outside of .edu TLD
NOTE: only 2nd level domain is returned, i.e. for aaa.bbb.uk only bbbl.uk
will be returned. This is necessary since many universities use
departmenntal domains, like cs.cmu.edu or andrew.cmu.edu
NOTE2: .edu domains are not included into this list as they're considered
belonging to universities by default.
How to get the original CSV:
```python
x = requests.get(
"https://raw.githubusercontent.com/Hipo/university-domains-list/"
"master/world_universities_and_domains.json").json()
domains = set(ds for u in x
for ds in u['domains'] if not "edu" in ds.rsplit(".", 2)[-2:])
domains = list(domains)
pd.Series(domains, index=domains, name="domain"
).drop(
["chat.ru"]
).to_csv("email_university_domains.csv", index=False)
```
"""
fpath = os.path.join(
os.path.dirname(__file__), "email_university_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def public_domains():
# type: () -> set
""" Return list of public email domains (i.e. offering free mailboxes)
How to get the original CSV:
x = requests.get(
"https://gist.githubusercontent.com/tbrianjones/5992856/raw/"
"87f527af7bdd21997722fa65143a9af7bee92583/"
"free_email_provider_domains.txt").text.split()
# manually coded
x.extend([
'gmail.com', 'users.noreply.github.com', 'hotmail.com',
'googlemail.com', 'users.sourceforge.net', 'iki.fi',
'yahoo.com', 'me.com', 'gmx.de', 'cihar.com', 'ya.ru',
'yandex.ru', 'outlook.com', 'gmx.net', 'web.de', 'pobox.com',
'yahoo.co.uk', 'qq.com', 'free.fr', 'icloud.com', '163.com',
'50mail.com', 'live.com', 'lavabit.com', 'mail.ru', '126.com',
'yahoo.fr', 'seznam.cz'
])
domains = list(set(x)) # make it unique
pd.Series(domains, index=domains, name="domain"
).drop( # mistakenly labeled as public
["unican.es"]
).to_csv("email_public_domains.csv", index=False)
>>> 'gmail.com' in public_domains()
True
>>> '163.com' in public_domains()
True
>>> 'qq.com' in public_domains()
True
>>> 'jaraco.com' in public_domains()
False
"""
fpath = os.path.join(os.path.dirname(__file__), "email_public_domains.csv")
with open(fpath) as fh:
return set(addr_domain.strip() for addr_domain in fh)
@memoize
def domain_user_stats():
# type: () -> pd.Series
""" Get number of distinct email addresses in observed domains
TODO: get up to date with new projects layout
How to build email_domain_users.csv:
from collections import defaultdict
import logging
from common import utils as common
import stscraper as scraper
log = logging.getLogger("domain_user_stats")
stats = defaultdict(set)
for ecosystem in common.ECOSYSTEMS:
urls = common.package_urls(ecosystem)
for package_name, url in urls.items():
log.info(package_name)
try:
cs = scraper.commits(url)
except scraper.RepoDoesNotExist:
continue
for email_addr in cs["author_email"].dropna().unique():
if not email_addr or pd.isnull(email_addr):
continue
try:
user, email_domain = clean(email_addr).split("@")
except InvalidEmail:
continue
stats[email_domain].add(user)
s = pd.Series({dm: len(users) for dm, users in stats.items()})
s = s.rename("users").sort_values(ascending=False)
s.to_csv("common/email_domain_users.csv", encoding="utf8", header=True)
return s
"""
fname = os.path.join(os.path.dirname(__file__), "email_domain_users.csv")
stats = pd.read_csv(fname, header=0, squeeze=True, index_col=0)
return stats[pd.notnull(stats.index)]
@memoize
def commercial_domains():
# type: () -> set
""" Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False
"""
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index)
def is_university(addr):
# type: (Union[str, unicode]) -> bool
""" Check if provided email has a university domain
- either in .edu domain
(except public sercices like england.edu or australia.edu)
- or in .edu.TLD (non-US based institutions, like edu.au)
- or listed in a public list of universities
since universities often have department addresses as well, only the end
is matched. E.g. cs.cmu.edu will match cmu.edu
:param addr: email address
:return: bool
>>> is_university("john@cmu.edu")
True
>>> is_university("john@gmail.com")
False
"""
addr_domain = domain(addr)
if not addr_domain: # invalid email
return False
chunks = addr_domain.split(".")
if len(chunks) < 2: # local or invalid address
return False
domains = university_domains()
# many universitites have departmental emails, such as cs.cmu.edu. However,
# the original dataset only has top level domain (cmu.edu). So, what we need
# to do is to strip leading subdomains until match or nothing to strip:
# isri.cs.cmu.edu (no match) -> cs.cmu.edu (no match) -> cmu.edu (match)
return (chunks[-1] == "edu" and chunks[-2] not in ("england", "australia"))\
or chunks[-2] == "edu" \
or any(".".join(chunks[i:]) in domains for i in range(len(chunks)-1))
def is_commercial(addr):
"""
>>> is_commercial("test@google.com")
True
>>> is_commercial("test@microsoft.com")
True
>>> is_commercial("test@jaraco.com")
False
"""
addr_domain = domain(addr)
return addr_domain and addr_domain in commercial_domains()
def is_commercial_bulk(addr_series):
domains = commercial_domains()
return addr_series.map(domain).map(lambda addr: addr in domains)
def is_public_bulk(addr_series):
return addr_series.map(lambda addr: is_public(addr))
def is_university_bulk(addr_series):
# type: (pd.Series) -> pd.Series
""" Since university subdomains have to be matched by parts, we can't use
bulk_check. Consider caching this call
"""
return addr_series.map(lambda addr: is_university(addr))
|
CMUSTRUDEL/strudel.utils | stutils/__init__.py | get_config | python | def get_config(variable, default=None):
if variable in CONFIG:
return CONFIG[variable]
if hasattr(settings, variable):
return getattr(settings, variable)
if variable in os.environ:
return os.environ[variable]
return default | Get configuration variable for strudel.* packages
Args:
variable (str): name of the config variable
default: value to use of config variable not set
Returns:
variable value
Order of search:
1. stutils.CONFIG
2. settings.py of the current folder
3. environment variable
Known config vars so far:
strudel.utils
ST_FS_CACHE_DURATION - duration of filesystem cache in seconds
ST_FS_CACHE_PATH - path to the folder to store filesystem cache
strudel.ecosystems
PYPI_SAVE_PATH - place to store downloaded PyPI packages
PYPI_TIMEOUT - network timeout for PyPI API
strudel.scraper
GITHUB_API_TOKENS - comma separated list of GitHub tokens
GITLAB_API_TOKENS - same for GitLab API | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/__init__.py#L20-L57 | null |
"""
"""
import os
try:
import settings
except ImportError:
settings = object()
__version__ = '0.6.2'
__author__ = "Marat (@cmu.edu)"
__license__ = "GPL v3"
CONFIG = {}
def set_config(variable, value):
""" Set configuration variable globally for all strudel.* packages """
CONFIG[variable] = value
|
CMUSTRUDEL/strudel.utils | stutils/decorators.py | fs_cache | python | def fs_cache(app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES, cache_dir='', helper_class=_FSCacher):
def decorator(func):
return helper_class(func, cache_dir, app_name, cache_type, idx, expires)
return decorator | A decorator to cache results of functions returning
pd.DataFrame or pd.Series objects under:
<cache_dir>/<app_name>/<cache_type>/<func_name>.<param_string>.csv,
missing parts, like app_name and cache_type, will be omitted
If cache_dir is omitted, stutils 'ST_FS_CACHE_PATH' conf dir will be used.
If 'ST_FS_CACHE_PATH' is not configured, a temporary directory
will be created.
:param app_name: if present, cache files for this application will be
stored in a separate folder
:param idx: number of columns to use as an index
:param cache_type: if present, cache files within app directory will be
separated into different folders by their cache_type.
:param expires: cache duration in seconds
:param cache_dir: set custom file cache path | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/decorators.py#L113-L137 | null |
import json
import logging
import os
import shutil
import time
import functools
import tempfile
import threading
import pandas as pd
import stutils
from stutils import mapreduce
from stutils.sysutils import mkdir
# filesystem cache decorator defaults
DEFAULT_EXPIRES = stutils.get_config('ST_FS_CACHE_DURATION', 3600 * 24 * 30 * 3)
DEFAULT_PATH = stutils.get_config(
'ST_FS_CACHE_PATH', os.path.join(tempfile.gettempdir(), '.st_fs_cache'))
def _argstring(*args):
"""Convert a list of variables into a single string for naming cache files.
It is used internally by many caching decorators
"""
return "_".join([str(arg).replace("/", ".") for arg in args])
class _FSCacher(object):
extension = 'csv'
def __init__(self, func, cache_dir='', app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES):
# type: (callable, str, str, str, Union[int, str, list], int) -> None
""" Helper class for @fs_cache internals
"""
self.func = func
functools.update_wrapper(self, func)
self.expires = expires
# Will create a path:
# <cache_dir>/<app_name>/<cache_type>/, omitting missing parts
self.cache_path = mkdir(cache_dir or DEFAULT_PATH,
app_name and app_name + '.cache', cache_type)
if isinstance(idx, int):
# Python3 range objects won't work, so explicitly convert to list
idx = list(range(idx))
self.idx = idx
def get_cache_fname(self, *args):
chunks = [self.func.__name__]
if args:
chunks.append(_argstring(*args))
chunks.append(self.extension)
return os.path.join(self.cache_path, ".".join(chunks))
def _expired(self, cache_fpath):
return not os.path.isfile(cache_fpath) \
or time.time() - os.path.getmtime(cache_fpath) > self.expires
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
return pd.read_csv(cache_fpath, index_col=self.idx,
encoding="utf8", squeeze=True)
res = self.func(*args)
if isinstance(res, pd.DataFrame):
df = res
if len(df.columns) == 1 and self.idx == 1:
logging.warning(
"Single column dataframe is returned by %s.\nSince "
"it will cause inconsistent behavior with @fs_cache "
"decorator, please consider changing result type "
"to pd.Series", self.func.__name__)
if any(not isinstance(cname, str) for cname in df.columns):
logging.warning(
"Some of the dataframe columns aren't strings. "
"This will result in inconsistent naming "
"if read from filesystem cache.")
elif isinstance(res, pd.Series):
# starting pandas 0.25 DataFrame and Series to_csv()
# will become compatible, but at least until 0.30 comes out
# keep this for compatibility
df = pd.DataFrame(res)
else:
raise ValueError("Unsupported result type (pd.DataFrame or "
"pd.Series expected, got %s)" % type(res))
df.to_csv(cache_fpath, float_format="%g", encoding="utf-8")
return res
def expired(self, *args):
return self._expired(self.get_cache_fname(*args))
def cached(self, *args):
return not self.expired(*args)
def invalidate(self, *args):
try:
os.remove(self.get_cache_fname(*args))
except OSError:
return False
return True
def invalidate_all(self):
""" Remove all files caching this function """
for fname in os.listdir(self.cache_path):
if fname.startswith(self.func.__name__ + "."):
os.remove(os.path.join(self.cache_path, fname))
def typed_fs_cache(app_name, *args, **kwargs):
""" Convenience method to simplify declaration of multiple @fs_cache
e.g.,
>>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30)
>>> @my_fs_cache('first_method')
... def some_method(*args, **kwargs):
... pass
>>> @my_fs_cache('second_method')
... def some_other_method(*args, **kwargs):
... pass
"""
return functools.partial(fs_cache, app_name, *args, **kwargs)
def memoize(func):
""" Classic memoize decorator for non-class methods """
cache = {}
@functools.wraps(func)
def wrapper(*args):
key = "__".join(str(arg) for arg in args)
if key not in cache:
cache[key] = func(*args)
return cache[key]
return wrapper
def cached_method(func):
""" Memoize for class methods """
@functools.wraps(func)
def wrapper(self, *args):
if not hasattr(self, "_cache"):
self._cache = {}
key = _argstring((func.__name__,) + args)
if key not in self._cache:
self._cache[key] = func(self, *args)
return self._cache[key]
return wrapper
def cached_property(func):
return property(cached_method(func))
class _FSIterCacher(_FSCacher):
extension = 'json'
def __init__(self, *args, **kwargs):
try:
import ijson.backends.yajl2 as ijson
except ImportError:
raise ImportError("Please install yajl-tools to use this decorator")
self.ijson = ijson
super(_FSIterCacher, self).__init__(*args, **kwargs)
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
with open(cache_fpath, 'rb') as cache_fh:
for item in self.ijson.items(cache_fh, "item"):
yield item
return
# if the iterator is not exhausted, the resulting file
# will contain invalid JSON. So, we write to a tempfile
# and then rename after the iterator is exhausted
cache_fh = tempfile.TemporaryFile()
cache_fh.write("[\n".encode('utf8'))
sep = ""
for item in self.func(*args):
cache_fh.write(sep.encode('utf8'))
sep = ",\n"
cache_fh.write(json.dumps(item).encode('utf8'))
yield item
cache_fh.write("]".encode('utf8'))
cache_fh.flush()
# os.rename will fail if /tmp is mapped to a different device
cache_fh.seek(0, 0)
target_fh = open(cache_fpath, 'wb')
shutil.copyfileobj(cache_fh, target_fh)
target_fh.close()
cache_fh.close()
def cache_iterator(*args, **kwargs):
""" A modification of fs_cache to handle large unstructured iterators
- e.g., a result of a GitHubAPI call
Special cases:
json always saves dict keys as strings, so cached dictionaries aren't
exactly the same as original
in Python2, json instantiates loaded strings as unicode, so cached
result might be slightly different from original
"""
kwargs['helper_class'] = _FSIterCacher
return fs_cache(*args, **kwargs)
def guard(func):
""" Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function.
"""
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs)
finally:
semaphore.release()
return wrapper
def threadpool(num_workers=None):
"""Apply stutils.mapreduce.map to the given function"""
def decorator(func):
@functools.wraps(func)
def wrapper(data):
return mapreduce.map(func, data, num_workers)
return wrapper
return decorator
|
CMUSTRUDEL/strudel.utils | stutils/decorators.py | typed_fs_cache | python | def typed_fs_cache(app_name, *args, **kwargs):
return functools.partial(fs_cache, app_name, *args, **kwargs) | Convenience method to simplify declaration of multiple @fs_cache
e.g.,
>>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30)
>>> @my_fs_cache('first_method')
... def some_method(*args, **kwargs):
... pass
>>> @my_fs_cache('second_method')
... def some_other_method(*args, **kwargs):
... pass | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/decorators.py#L140-L153 | null |
import json
import logging
import os
import shutil
import time
import functools
import tempfile
import threading
import pandas as pd
import stutils
from stutils import mapreduce
from stutils.sysutils import mkdir
# filesystem cache decorator defaults
DEFAULT_EXPIRES = stutils.get_config('ST_FS_CACHE_DURATION', 3600 * 24 * 30 * 3)
DEFAULT_PATH = stutils.get_config(
'ST_FS_CACHE_PATH', os.path.join(tempfile.gettempdir(), '.st_fs_cache'))
def _argstring(*args):
"""Convert a list of variables into a single string for naming cache files.
It is used internally by many caching decorators
"""
return "_".join([str(arg).replace("/", ".") for arg in args])
class _FSCacher(object):
extension = 'csv'
def __init__(self, func, cache_dir='', app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES):
# type: (callable, str, str, str, Union[int, str, list], int) -> None
""" Helper class for @fs_cache internals
"""
self.func = func
functools.update_wrapper(self, func)
self.expires = expires
# Will create a path:
# <cache_dir>/<app_name>/<cache_type>/, omitting missing parts
self.cache_path = mkdir(cache_dir or DEFAULT_PATH,
app_name and app_name + '.cache', cache_type)
if isinstance(idx, int):
# Python3 range objects won't work, so explicitly convert to list
idx = list(range(idx))
self.idx = idx
def get_cache_fname(self, *args):
chunks = [self.func.__name__]
if args:
chunks.append(_argstring(*args))
chunks.append(self.extension)
return os.path.join(self.cache_path, ".".join(chunks))
def _expired(self, cache_fpath):
return not os.path.isfile(cache_fpath) \
or time.time() - os.path.getmtime(cache_fpath) > self.expires
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
return pd.read_csv(cache_fpath, index_col=self.idx,
encoding="utf8", squeeze=True)
res = self.func(*args)
if isinstance(res, pd.DataFrame):
df = res
if len(df.columns) == 1 and self.idx == 1:
logging.warning(
"Single column dataframe is returned by %s.\nSince "
"it will cause inconsistent behavior with @fs_cache "
"decorator, please consider changing result type "
"to pd.Series", self.func.__name__)
if any(not isinstance(cname, str) for cname in df.columns):
logging.warning(
"Some of the dataframe columns aren't strings. "
"This will result in inconsistent naming "
"if read from filesystem cache.")
elif isinstance(res, pd.Series):
# starting pandas 0.25 DataFrame and Series to_csv()
# will become compatible, but at least until 0.30 comes out
# keep this for compatibility
df = pd.DataFrame(res)
else:
raise ValueError("Unsupported result type (pd.DataFrame or "
"pd.Series expected, got %s)" % type(res))
df.to_csv(cache_fpath, float_format="%g", encoding="utf-8")
return res
def expired(self, *args):
return self._expired(self.get_cache_fname(*args))
def cached(self, *args):
return not self.expired(*args)
def invalidate(self, *args):
try:
os.remove(self.get_cache_fname(*args))
except OSError:
return False
return True
def invalidate_all(self):
""" Remove all files caching this function """
for fname in os.listdir(self.cache_path):
if fname.startswith(self.func.__name__ + "."):
os.remove(os.path.join(self.cache_path, fname))
def fs_cache(app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES, cache_dir='', helper_class=_FSCacher):
"""
A decorator to cache results of functions returning
pd.DataFrame or pd.Series objects under:
<cache_dir>/<app_name>/<cache_type>/<func_name>.<param_string>.csv,
missing parts, like app_name and cache_type, will be omitted
If cache_dir is omitted, stutils 'ST_FS_CACHE_PATH' conf dir will be used.
If 'ST_FS_CACHE_PATH' is not configured, a temporary directory
will be created.
:param app_name: if present, cache files for this application will be
stored in a separate folder
:param idx: number of columns to use as an index
:param cache_type: if present, cache files within app directory will be
separated into different folders by their cache_type.
:param expires: cache duration in seconds
:param cache_dir: set custom file cache path
"""
def decorator(func):
return helper_class(func, cache_dir, app_name, cache_type, idx, expires)
return decorator
def memoize(func):
""" Classic memoize decorator for non-class methods """
cache = {}
@functools.wraps(func)
def wrapper(*args):
key = "__".join(str(arg) for arg in args)
if key not in cache:
cache[key] = func(*args)
return cache[key]
return wrapper
def cached_method(func):
""" Memoize for class methods """
@functools.wraps(func)
def wrapper(self, *args):
if not hasattr(self, "_cache"):
self._cache = {}
key = _argstring((func.__name__,) + args)
if key not in self._cache:
self._cache[key] = func(self, *args)
return self._cache[key]
return wrapper
def cached_property(func):
return property(cached_method(func))
class _FSIterCacher(_FSCacher):
extension = 'json'
def __init__(self, *args, **kwargs):
try:
import ijson.backends.yajl2 as ijson
except ImportError:
raise ImportError("Please install yajl-tools to use this decorator")
self.ijson = ijson
super(_FSIterCacher, self).__init__(*args, **kwargs)
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
with open(cache_fpath, 'rb') as cache_fh:
for item in self.ijson.items(cache_fh, "item"):
yield item
return
# if the iterator is not exhausted, the resulting file
# will contain invalid JSON. So, we write to a tempfile
# and then rename after the iterator is exhausted
cache_fh = tempfile.TemporaryFile()
cache_fh.write("[\n".encode('utf8'))
sep = ""
for item in self.func(*args):
cache_fh.write(sep.encode('utf8'))
sep = ",\n"
cache_fh.write(json.dumps(item).encode('utf8'))
yield item
cache_fh.write("]".encode('utf8'))
cache_fh.flush()
# os.rename will fail if /tmp is mapped to a different device
cache_fh.seek(0, 0)
target_fh = open(cache_fpath, 'wb')
shutil.copyfileobj(cache_fh, target_fh)
target_fh.close()
cache_fh.close()
def cache_iterator(*args, **kwargs):
""" A modification of fs_cache to handle large unstructured iterators
- e.g., a result of a GitHubAPI call
Special cases:
json always saves dict keys as strings, so cached dictionaries aren't
exactly the same as original
in Python2, json instantiates loaded strings as unicode, so cached
result might be slightly different from original
"""
kwargs['helper_class'] = _FSIterCacher
return fs_cache(*args, **kwargs)
def guard(func):
""" Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function.
"""
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs)
finally:
semaphore.release()
return wrapper
def threadpool(num_workers=None):
"""Apply stutils.mapreduce.map to the given function"""
def decorator(func):
@functools.wraps(func)
def wrapper(data):
return mapreduce.map(func, data, num_workers)
return wrapper
return decorator
|
CMUSTRUDEL/strudel.utils | stutils/decorators.py | memoize | python | def memoize(func):
cache = {}
@functools.wraps(func)
def wrapper(*args):
key = "__".join(str(arg) for arg in args)
if key not in cache:
cache[key] = func(*args)
return cache[key]
return wrapper | Classic memoize decorator for non-class methods | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/decorators.py#L156-L166 | null |
import json
import logging
import os
import shutil
import time
import functools
import tempfile
import threading
import pandas as pd
import stutils
from stutils import mapreduce
from stutils.sysutils import mkdir
# filesystem cache decorator defaults
DEFAULT_EXPIRES = stutils.get_config('ST_FS_CACHE_DURATION', 3600 * 24 * 30 * 3)
DEFAULT_PATH = stutils.get_config(
'ST_FS_CACHE_PATH', os.path.join(tempfile.gettempdir(), '.st_fs_cache'))
def _argstring(*args):
"""Convert a list of variables into a single string for naming cache files.
It is used internally by many caching decorators
"""
return "_".join([str(arg).replace("/", ".") for arg in args])
class _FSCacher(object):
extension = 'csv'
def __init__(self, func, cache_dir='', app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES):
# type: (callable, str, str, str, Union[int, str, list], int) -> None
""" Helper class for @fs_cache internals
"""
self.func = func
functools.update_wrapper(self, func)
self.expires = expires
# Will create a path:
# <cache_dir>/<app_name>/<cache_type>/, omitting missing parts
self.cache_path = mkdir(cache_dir or DEFAULT_PATH,
app_name and app_name + '.cache', cache_type)
if isinstance(idx, int):
# Python3 range objects won't work, so explicitly convert to list
idx = list(range(idx))
self.idx = idx
def get_cache_fname(self, *args):
chunks = [self.func.__name__]
if args:
chunks.append(_argstring(*args))
chunks.append(self.extension)
return os.path.join(self.cache_path, ".".join(chunks))
def _expired(self, cache_fpath):
return not os.path.isfile(cache_fpath) \
or time.time() - os.path.getmtime(cache_fpath) > self.expires
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
return pd.read_csv(cache_fpath, index_col=self.idx,
encoding="utf8", squeeze=True)
res = self.func(*args)
if isinstance(res, pd.DataFrame):
df = res
if len(df.columns) == 1 and self.idx == 1:
logging.warning(
"Single column dataframe is returned by %s.\nSince "
"it will cause inconsistent behavior with @fs_cache "
"decorator, please consider changing result type "
"to pd.Series", self.func.__name__)
if any(not isinstance(cname, str) for cname in df.columns):
logging.warning(
"Some of the dataframe columns aren't strings. "
"This will result in inconsistent naming "
"if read from filesystem cache.")
elif isinstance(res, pd.Series):
# starting pandas 0.25 DataFrame and Series to_csv()
# will become compatible, but at least until 0.30 comes out
# keep this for compatibility
df = pd.DataFrame(res)
else:
raise ValueError("Unsupported result type (pd.DataFrame or "
"pd.Series expected, got %s)" % type(res))
df.to_csv(cache_fpath, float_format="%g", encoding="utf-8")
return res
def expired(self, *args):
return self._expired(self.get_cache_fname(*args))
def cached(self, *args):
return not self.expired(*args)
def invalidate(self, *args):
try:
os.remove(self.get_cache_fname(*args))
except OSError:
return False
return True
def invalidate_all(self):
""" Remove all files caching this function """
for fname in os.listdir(self.cache_path):
if fname.startswith(self.func.__name__ + "."):
os.remove(os.path.join(self.cache_path, fname))
def fs_cache(app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES, cache_dir='', helper_class=_FSCacher):
"""
A decorator to cache results of functions returning
pd.DataFrame or pd.Series objects under:
<cache_dir>/<app_name>/<cache_type>/<func_name>.<param_string>.csv,
missing parts, like app_name and cache_type, will be omitted
If cache_dir is omitted, stutils 'ST_FS_CACHE_PATH' conf dir will be used.
If 'ST_FS_CACHE_PATH' is not configured, a temporary directory
will be created.
:param app_name: if present, cache files for this application will be
stored in a separate folder
:param idx: number of columns to use as an index
:param cache_type: if present, cache files within app directory will be
separated into different folders by their cache_type.
:param expires: cache duration in seconds
:param cache_dir: set custom file cache path
"""
def decorator(func):
return helper_class(func, cache_dir, app_name, cache_type, idx, expires)
return decorator
def typed_fs_cache(app_name, *args, **kwargs):
""" Convenience method to simplify declaration of multiple @fs_cache
e.g.,
>>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30)
>>> @my_fs_cache('first_method')
... def some_method(*args, **kwargs):
... pass
>>> @my_fs_cache('second_method')
... def some_other_method(*args, **kwargs):
... pass
"""
return functools.partial(fs_cache, app_name, *args, **kwargs)
def cached_method(func):
""" Memoize for class methods """
@functools.wraps(func)
def wrapper(self, *args):
if not hasattr(self, "_cache"):
self._cache = {}
key = _argstring((func.__name__,) + args)
if key not in self._cache:
self._cache[key] = func(self, *args)
return self._cache[key]
return wrapper
def cached_property(func):
return property(cached_method(func))
class _FSIterCacher(_FSCacher):
extension = 'json'
def __init__(self, *args, **kwargs):
try:
import ijson.backends.yajl2 as ijson
except ImportError:
raise ImportError("Please install yajl-tools to use this decorator")
self.ijson = ijson
super(_FSIterCacher, self).__init__(*args, **kwargs)
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
with open(cache_fpath, 'rb') as cache_fh:
for item in self.ijson.items(cache_fh, "item"):
yield item
return
# if the iterator is not exhausted, the resulting file
# will contain invalid JSON. So, we write to a tempfile
# and then rename after the iterator is exhausted
cache_fh = tempfile.TemporaryFile()
cache_fh.write("[\n".encode('utf8'))
sep = ""
for item in self.func(*args):
cache_fh.write(sep.encode('utf8'))
sep = ",\n"
cache_fh.write(json.dumps(item).encode('utf8'))
yield item
cache_fh.write("]".encode('utf8'))
cache_fh.flush()
# os.rename will fail if /tmp is mapped to a different device
cache_fh.seek(0, 0)
target_fh = open(cache_fpath, 'wb')
shutil.copyfileobj(cache_fh, target_fh)
target_fh.close()
cache_fh.close()
def cache_iterator(*args, **kwargs):
""" A modification of fs_cache to handle large unstructured iterators
- e.g., a result of a GitHubAPI call
Special cases:
json always saves dict keys as strings, so cached dictionaries aren't
exactly the same as original
in Python2, json instantiates loaded strings as unicode, so cached
result might be slightly different from original
"""
kwargs['helper_class'] = _FSIterCacher
return fs_cache(*args, **kwargs)
def guard(func):
""" Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function.
"""
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs)
finally:
semaphore.release()
return wrapper
def threadpool(num_workers=None):
"""Apply stutils.mapreduce.map to the given function"""
def decorator(func):
@functools.wraps(func)
def wrapper(data):
return mapreduce.map(func, data, num_workers)
return wrapper
return decorator
|
CMUSTRUDEL/strudel.utils | stutils/decorators.py | cached_method | python | def cached_method(func):
@functools.wraps(func)
def wrapper(self, *args):
if not hasattr(self, "_cache"):
self._cache = {}
key = _argstring((func.__name__,) + args)
if key not in self._cache:
self._cache[key] = func(self, *args)
return self._cache[key]
return wrapper | Memoize for class methods | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/decorators.py#L169-L179 | null |
import json
import logging
import os
import shutil
import time
import functools
import tempfile
import threading
import pandas as pd
import stutils
from stutils import mapreduce
from stutils.sysutils import mkdir
# filesystem cache decorator defaults
DEFAULT_EXPIRES = stutils.get_config('ST_FS_CACHE_DURATION', 3600 * 24 * 30 * 3)
DEFAULT_PATH = stutils.get_config(
'ST_FS_CACHE_PATH', os.path.join(tempfile.gettempdir(), '.st_fs_cache'))
def _argstring(*args):
"""Convert a list of variables into a single string for naming cache files.
It is used internally by many caching decorators
"""
return "_".join([str(arg).replace("/", ".") for arg in args])
class _FSCacher(object):
extension = 'csv'
def __init__(self, func, cache_dir='', app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES):
# type: (callable, str, str, str, Union[int, str, list], int) -> None
""" Helper class for @fs_cache internals
"""
self.func = func
functools.update_wrapper(self, func)
self.expires = expires
# Will create a path:
# <cache_dir>/<app_name>/<cache_type>/, omitting missing parts
self.cache_path = mkdir(cache_dir or DEFAULT_PATH,
app_name and app_name + '.cache', cache_type)
if isinstance(idx, int):
# Python3 range objects won't work, so explicitly convert to list
idx = list(range(idx))
self.idx = idx
def get_cache_fname(self, *args):
chunks = [self.func.__name__]
if args:
chunks.append(_argstring(*args))
chunks.append(self.extension)
return os.path.join(self.cache_path, ".".join(chunks))
def _expired(self, cache_fpath):
return not os.path.isfile(cache_fpath) \
or time.time() - os.path.getmtime(cache_fpath) > self.expires
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
return pd.read_csv(cache_fpath, index_col=self.idx,
encoding="utf8", squeeze=True)
res = self.func(*args)
if isinstance(res, pd.DataFrame):
df = res
if len(df.columns) == 1 and self.idx == 1:
logging.warning(
"Single column dataframe is returned by %s.\nSince "
"it will cause inconsistent behavior with @fs_cache "
"decorator, please consider changing result type "
"to pd.Series", self.func.__name__)
if any(not isinstance(cname, str) for cname in df.columns):
logging.warning(
"Some of the dataframe columns aren't strings. "
"This will result in inconsistent naming "
"if read from filesystem cache.")
elif isinstance(res, pd.Series):
# starting pandas 0.25 DataFrame and Series to_csv()
# will become compatible, but at least until 0.30 comes out
# keep this for compatibility
df = pd.DataFrame(res)
else:
raise ValueError("Unsupported result type (pd.DataFrame or "
"pd.Series expected, got %s)" % type(res))
df.to_csv(cache_fpath, float_format="%g", encoding="utf-8")
return res
def expired(self, *args):
return self._expired(self.get_cache_fname(*args))
def cached(self, *args):
return not self.expired(*args)
def invalidate(self, *args):
try:
os.remove(self.get_cache_fname(*args))
except OSError:
return False
return True
def invalidate_all(self):
""" Remove all files caching this function """
for fname in os.listdir(self.cache_path):
if fname.startswith(self.func.__name__ + "."):
os.remove(os.path.join(self.cache_path, fname))
def fs_cache(app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES, cache_dir='', helper_class=_FSCacher):
"""
A decorator to cache results of functions returning
pd.DataFrame or pd.Series objects under:
<cache_dir>/<app_name>/<cache_type>/<func_name>.<param_string>.csv,
missing parts, like app_name and cache_type, will be omitted
If cache_dir is omitted, stutils 'ST_FS_CACHE_PATH' conf dir will be used.
If 'ST_FS_CACHE_PATH' is not configured, a temporary directory
will be created.
:param app_name: if present, cache files for this application will be
stored in a separate folder
:param idx: number of columns to use as an index
:param cache_type: if present, cache files within app directory will be
separated into different folders by their cache_type.
:param expires: cache duration in seconds
:param cache_dir: set custom file cache path
"""
def decorator(func):
return helper_class(func, cache_dir, app_name, cache_type, idx, expires)
return decorator
def typed_fs_cache(app_name, *args, **kwargs):
""" Convenience method to simplify declaration of multiple @fs_cache
e.g.,
>>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30)
>>> @my_fs_cache('first_method')
... def some_method(*args, **kwargs):
... pass
>>> @my_fs_cache('second_method')
... def some_other_method(*args, **kwargs):
... pass
"""
return functools.partial(fs_cache, app_name, *args, **kwargs)
def memoize(func):
""" Classic memoize decorator for non-class methods """
cache = {}
@functools.wraps(func)
def wrapper(*args):
key = "__".join(str(arg) for arg in args)
if key not in cache:
cache[key] = func(*args)
return cache[key]
return wrapper
def cached_property(func):
return property(cached_method(func))
class _FSIterCacher(_FSCacher):
extension = 'json'
def __init__(self, *args, **kwargs):
try:
import ijson.backends.yajl2 as ijson
except ImportError:
raise ImportError("Please install yajl-tools to use this decorator")
self.ijson = ijson
super(_FSIterCacher, self).__init__(*args, **kwargs)
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
with open(cache_fpath, 'rb') as cache_fh:
for item in self.ijson.items(cache_fh, "item"):
yield item
return
# if the iterator is not exhausted, the resulting file
# will contain invalid JSON. So, we write to a tempfile
# and then rename after the iterator is exhausted
cache_fh = tempfile.TemporaryFile()
cache_fh.write("[\n".encode('utf8'))
sep = ""
for item in self.func(*args):
cache_fh.write(sep.encode('utf8'))
sep = ",\n"
cache_fh.write(json.dumps(item).encode('utf8'))
yield item
cache_fh.write("]".encode('utf8'))
cache_fh.flush()
# os.rename will fail if /tmp is mapped to a different device
cache_fh.seek(0, 0)
target_fh = open(cache_fpath, 'wb')
shutil.copyfileobj(cache_fh, target_fh)
target_fh.close()
cache_fh.close()
def cache_iterator(*args, **kwargs):
""" A modification of fs_cache to handle large unstructured iterators
- e.g., a result of a GitHubAPI call
Special cases:
json always saves dict keys as strings, so cached dictionaries aren't
exactly the same as original
in Python2, json instantiates loaded strings as unicode, so cached
result might be slightly different from original
"""
kwargs['helper_class'] = _FSIterCacher
return fs_cache(*args, **kwargs)
def guard(func):
""" Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function.
"""
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs)
finally:
semaphore.release()
return wrapper
def threadpool(num_workers=None):
"""Apply stutils.mapreduce.map to the given function"""
def decorator(func):
@functools.wraps(func)
def wrapper(data):
return mapreduce.map(func, data, num_workers)
return wrapper
return decorator
|
CMUSTRUDEL/strudel.utils | stutils/decorators.py | guard | python | def guard(func):
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs)
finally:
semaphore.release()
return wrapper | Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function. | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/decorators.py#L241-L257 | null |
import json
import logging
import os
import shutil
import time
import functools
import tempfile
import threading
import pandas as pd
import stutils
from stutils import mapreduce
from stutils.sysutils import mkdir
# filesystem cache decorator defaults
DEFAULT_EXPIRES = stutils.get_config('ST_FS_CACHE_DURATION', 3600 * 24 * 30 * 3)
DEFAULT_PATH = stutils.get_config(
'ST_FS_CACHE_PATH', os.path.join(tempfile.gettempdir(), '.st_fs_cache'))
def _argstring(*args):
"""Convert a list of variables into a single string for naming cache files.
It is used internally by many caching decorators
"""
return "_".join([str(arg).replace("/", ".") for arg in args])
class _FSCacher(object):
extension = 'csv'
def __init__(self, func, cache_dir='', app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES):
# type: (callable, str, str, str, Union[int, str, list], int) -> None
""" Helper class for @fs_cache internals
"""
self.func = func
functools.update_wrapper(self, func)
self.expires = expires
# Will create a path:
# <cache_dir>/<app_name>/<cache_type>/, omitting missing parts
self.cache_path = mkdir(cache_dir or DEFAULT_PATH,
app_name and app_name + '.cache', cache_type)
if isinstance(idx, int):
# Python3 range objects won't work, so explicitly convert to list
idx = list(range(idx))
self.idx = idx
def get_cache_fname(self, *args):
chunks = [self.func.__name__]
if args:
chunks.append(_argstring(*args))
chunks.append(self.extension)
return os.path.join(self.cache_path, ".".join(chunks))
def _expired(self, cache_fpath):
return not os.path.isfile(cache_fpath) \
or time.time() - os.path.getmtime(cache_fpath) > self.expires
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
return pd.read_csv(cache_fpath, index_col=self.idx,
encoding="utf8", squeeze=True)
res = self.func(*args)
if isinstance(res, pd.DataFrame):
df = res
if len(df.columns) == 1 and self.idx == 1:
logging.warning(
"Single column dataframe is returned by %s.\nSince "
"it will cause inconsistent behavior with @fs_cache "
"decorator, please consider changing result type "
"to pd.Series", self.func.__name__)
if any(not isinstance(cname, str) for cname in df.columns):
logging.warning(
"Some of the dataframe columns aren't strings. "
"This will result in inconsistent naming "
"if read from filesystem cache.")
elif isinstance(res, pd.Series):
# starting pandas 0.25 DataFrame and Series to_csv()
# will become compatible, but at least until 0.30 comes out
# keep this for compatibility
df = pd.DataFrame(res)
else:
raise ValueError("Unsupported result type (pd.DataFrame or "
"pd.Series expected, got %s)" % type(res))
df.to_csv(cache_fpath, float_format="%g", encoding="utf-8")
return res
def expired(self, *args):
return self._expired(self.get_cache_fname(*args))
def cached(self, *args):
return not self.expired(*args)
def invalidate(self, *args):
try:
os.remove(self.get_cache_fname(*args))
except OSError:
return False
return True
def invalidate_all(self):
""" Remove all files caching this function """
for fname in os.listdir(self.cache_path):
if fname.startswith(self.func.__name__ + "."):
os.remove(os.path.join(self.cache_path, fname))
def fs_cache(app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES, cache_dir='', helper_class=_FSCacher):
"""
A decorator to cache results of functions returning
pd.DataFrame or pd.Series objects under:
<cache_dir>/<app_name>/<cache_type>/<func_name>.<param_string>.csv,
missing parts, like app_name and cache_type, will be omitted
If cache_dir is omitted, stutils 'ST_FS_CACHE_PATH' conf dir will be used.
If 'ST_FS_CACHE_PATH' is not configured, a temporary directory
will be created.
:param app_name: if present, cache files for this application will be
stored in a separate folder
:param idx: number of columns to use as an index
:param cache_type: if present, cache files within app directory will be
separated into different folders by their cache_type.
:param expires: cache duration in seconds
:param cache_dir: set custom file cache path
"""
def decorator(func):
return helper_class(func, cache_dir, app_name, cache_type, idx, expires)
return decorator
def typed_fs_cache(app_name, *args, **kwargs):
""" Convenience method to simplify declaration of multiple @fs_cache
e.g.,
>>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30)
>>> @my_fs_cache('first_method')
... def some_method(*args, **kwargs):
... pass
>>> @my_fs_cache('second_method')
... def some_other_method(*args, **kwargs):
... pass
"""
return functools.partial(fs_cache, app_name, *args, **kwargs)
def memoize(func):
""" Classic memoize decorator for non-class methods """
cache = {}
@functools.wraps(func)
def wrapper(*args):
key = "__".join(str(arg) for arg in args)
if key not in cache:
cache[key] = func(*args)
return cache[key]
return wrapper
def cached_method(func):
""" Memoize for class methods """
@functools.wraps(func)
def wrapper(self, *args):
if not hasattr(self, "_cache"):
self._cache = {}
key = _argstring((func.__name__,) + args)
if key not in self._cache:
self._cache[key] = func(self, *args)
return self._cache[key]
return wrapper
def cached_property(func):
return property(cached_method(func))
class _FSIterCacher(_FSCacher):
extension = 'json'
def __init__(self, *args, **kwargs):
try:
import ijson.backends.yajl2 as ijson
except ImportError:
raise ImportError("Please install yajl-tools to use this decorator")
self.ijson = ijson
super(_FSIterCacher, self).__init__(*args, **kwargs)
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
with open(cache_fpath, 'rb') as cache_fh:
for item in self.ijson.items(cache_fh, "item"):
yield item
return
# if the iterator is not exhausted, the resulting file
# will contain invalid JSON. So, we write to a tempfile
# and then rename after the iterator is exhausted
cache_fh = tempfile.TemporaryFile()
cache_fh.write("[\n".encode('utf8'))
sep = ""
for item in self.func(*args):
cache_fh.write(sep.encode('utf8'))
sep = ",\n"
cache_fh.write(json.dumps(item).encode('utf8'))
yield item
cache_fh.write("]".encode('utf8'))
cache_fh.flush()
# os.rename will fail if /tmp is mapped to a different device
cache_fh.seek(0, 0)
target_fh = open(cache_fpath, 'wb')
shutil.copyfileobj(cache_fh, target_fh)
target_fh.close()
cache_fh.close()
def cache_iterator(*args, **kwargs):
""" A modification of fs_cache to handle large unstructured iterators
- e.g., a result of a GitHubAPI call
Special cases:
json always saves dict keys as strings, so cached dictionaries aren't
exactly the same as original
in Python2, json instantiates loaded strings as unicode, so cached
result might be slightly different from original
"""
kwargs['helper_class'] = _FSIterCacher
return fs_cache(*args, **kwargs)
def threadpool(num_workers=None):
"""Apply stutils.mapreduce.map to the given function"""
def decorator(func):
@functools.wraps(func)
def wrapper(data):
return mapreduce.map(func, data, num_workers)
return wrapper
return decorator
|
CMUSTRUDEL/strudel.utils | stutils/decorators.py | threadpool | python | def threadpool(num_workers=None):
def decorator(func):
@functools.wraps(func)
def wrapper(data):
return mapreduce.map(func, data, num_workers)
return wrapper
return decorator | Apply stutils.mapreduce.map to the given function | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/decorators.py#L260-L267 | null |
import json
import logging
import os
import shutil
import time
import functools
import tempfile
import threading
import pandas as pd
import stutils
from stutils import mapreduce
from stutils.sysutils import mkdir
# filesystem cache decorator defaults
DEFAULT_EXPIRES = stutils.get_config('ST_FS_CACHE_DURATION', 3600 * 24 * 30 * 3)
DEFAULT_PATH = stutils.get_config(
'ST_FS_CACHE_PATH', os.path.join(tempfile.gettempdir(), '.st_fs_cache'))
def _argstring(*args):
"""Convert a list of variables into a single string for naming cache files.
It is used internally by many caching decorators
"""
return "_".join([str(arg).replace("/", ".") for arg in args])
class _FSCacher(object):
extension = 'csv'
def __init__(self, func, cache_dir='', app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES):
# type: (callable, str, str, str, Union[int, str, list], int) -> None
""" Helper class for @fs_cache internals
"""
self.func = func
functools.update_wrapper(self, func)
self.expires = expires
# Will create a path:
# <cache_dir>/<app_name>/<cache_type>/, omitting missing parts
self.cache_path = mkdir(cache_dir or DEFAULT_PATH,
app_name and app_name + '.cache', cache_type)
if isinstance(idx, int):
# Python3 range objects won't work, so explicitly convert to list
idx = list(range(idx))
self.idx = idx
def get_cache_fname(self, *args):
chunks = [self.func.__name__]
if args:
chunks.append(_argstring(*args))
chunks.append(self.extension)
return os.path.join(self.cache_path, ".".join(chunks))
def _expired(self, cache_fpath):
return not os.path.isfile(cache_fpath) \
or time.time() - os.path.getmtime(cache_fpath) > self.expires
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
return pd.read_csv(cache_fpath, index_col=self.idx,
encoding="utf8", squeeze=True)
res = self.func(*args)
if isinstance(res, pd.DataFrame):
df = res
if len(df.columns) == 1 and self.idx == 1:
logging.warning(
"Single column dataframe is returned by %s.\nSince "
"it will cause inconsistent behavior with @fs_cache "
"decorator, please consider changing result type "
"to pd.Series", self.func.__name__)
if any(not isinstance(cname, str) for cname in df.columns):
logging.warning(
"Some of the dataframe columns aren't strings. "
"This will result in inconsistent naming "
"if read from filesystem cache.")
elif isinstance(res, pd.Series):
# starting pandas 0.25 DataFrame and Series to_csv()
# will become compatible, but at least until 0.30 comes out
# keep this for compatibility
df = pd.DataFrame(res)
else:
raise ValueError("Unsupported result type (pd.DataFrame or "
"pd.Series expected, got %s)" % type(res))
df.to_csv(cache_fpath, float_format="%g", encoding="utf-8")
return res
def expired(self, *args):
return self._expired(self.get_cache_fname(*args))
def cached(self, *args):
return not self.expired(*args)
def invalidate(self, *args):
try:
os.remove(self.get_cache_fname(*args))
except OSError:
return False
return True
def invalidate_all(self):
""" Remove all files caching this function """
for fname in os.listdir(self.cache_path):
if fname.startswith(self.func.__name__ + "."):
os.remove(os.path.join(self.cache_path, fname))
def fs_cache(app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES, cache_dir='', helper_class=_FSCacher):
"""
A decorator to cache results of functions returning
pd.DataFrame or pd.Series objects under:
<cache_dir>/<app_name>/<cache_type>/<func_name>.<param_string>.csv,
missing parts, like app_name and cache_type, will be omitted
If cache_dir is omitted, stutils 'ST_FS_CACHE_PATH' conf dir will be used.
If 'ST_FS_CACHE_PATH' is not configured, a temporary directory
will be created.
:param app_name: if present, cache files for this application will be
stored in a separate folder
:param idx: number of columns to use as an index
:param cache_type: if present, cache files within app directory will be
separated into different folders by their cache_type.
:param expires: cache duration in seconds
:param cache_dir: set custom file cache path
"""
def decorator(func):
return helper_class(func, cache_dir, app_name, cache_type, idx, expires)
return decorator
def typed_fs_cache(app_name, *args, **kwargs):
""" Convenience method to simplify declaration of multiple @fs_cache
e.g.,
>>> my_fs_cache = typed_fs_cache('myapp_name', expires=86400 * 30)
>>> @my_fs_cache('first_method')
... def some_method(*args, **kwargs):
... pass
>>> @my_fs_cache('second_method')
... def some_other_method(*args, **kwargs):
... pass
"""
return functools.partial(fs_cache, app_name, *args, **kwargs)
def memoize(func):
""" Classic memoize decorator for non-class methods """
cache = {}
@functools.wraps(func)
def wrapper(*args):
key = "__".join(str(arg) for arg in args)
if key not in cache:
cache[key] = func(*args)
return cache[key]
return wrapper
def cached_method(func):
""" Memoize for class methods """
@functools.wraps(func)
def wrapper(self, *args):
if not hasattr(self, "_cache"):
self._cache = {}
key = _argstring((func.__name__,) + args)
if key not in self._cache:
self._cache[key] = func(self, *args)
return self._cache[key]
return wrapper
def cached_property(func):
return property(cached_method(func))
class _FSIterCacher(_FSCacher):
extension = 'json'
def __init__(self, *args, **kwargs):
try:
import ijson.backends.yajl2 as ijson
except ImportError:
raise ImportError("Please install yajl-tools to use this decorator")
self.ijson = ijson
super(_FSIterCacher, self).__init__(*args, **kwargs)
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
with open(cache_fpath, 'rb') as cache_fh:
for item in self.ijson.items(cache_fh, "item"):
yield item
return
# if the iterator is not exhausted, the resulting file
# will contain invalid JSON. So, we write to a tempfile
# and then rename after the iterator is exhausted
cache_fh = tempfile.TemporaryFile()
cache_fh.write("[\n".encode('utf8'))
sep = ""
for item in self.func(*args):
cache_fh.write(sep.encode('utf8'))
sep = ",\n"
cache_fh.write(json.dumps(item).encode('utf8'))
yield item
cache_fh.write("]".encode('utf8'))
cache_fh.flush()
# os.rename will fail if /tmp is mapped to a different device
cache_fh.seek(0, 0)
target_fh = open(cache_fpath, 'wb')
shutil.copyfileobj(cache_fh, target_fh)
target_fh.close()
cache_fh.close()
def cache_iterator(*args, **kwargs):
""" A modification of fs_cache to handle large unstructured iterators
- e.g., a result of a GitHubAPI call
Special cases:
json always saves dict keys as strings, so cached dictionaries aren't
exactly the same as original
in Python2, json instantiates loaded strings as unicode, so cached
result might be slightly different from original
"""
kwargs['helper_class'] = _FSIterCacher
return fs_cache(*args, **kwargs)
def guard(func):
""" Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function.
"""
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs)
finally:
semaphore.release()
return wrapper
|
CMUSTRUDEL/strudel.utils | stutils/decorators.py | _FSCacher.invalidate_all | python | def invalidate_all(self):
for fname in os.listdir(self.cache_path):
if fname.startswith(self.func.__name__ + "."):
os.remove(os.path.join(self.cache_path, fname)) | Remove all files caching this function | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/decorators.py#L106-L110 | null | class _FSCacher(object):
extension = 'csv'
def __init__(self, func, cache_dir='', app_name='', cache_type='', idx=1,
expires=DEFAULT_EXPIRES):
# type: (callable, str, str, str, Union[int, str, list], int) -> None
""" Helper class for @fs_cache internals
"""
self.func = func
functools.update_wrapper(self, func)
self.expires = expires
# Will create a path:
# <cache_dir>/<app_name>/<cache_type>/, omitting missing parts
self.cache_path = mkdir(cache_dir or DEFAULT_PATH,
app_name and app_name + '.cache', cache_type)
if isinstance(idx, int):
# Python3 range objects won't work, so explicitly convert to list
idx = list(range(idx))
self.idx = idx
def get_cache_fname(self, *args):
chunks = [self.func.__name__]
if args:
chunks.append(_argstring(*args))
chunks.append(self.extension)
return os.path.join(self.cache_path, ".".join(chunks))
def _expired(self, cache_fpath):
return not os.path.isfile(cache_fpath) \
or time.time() - os.path.getmtime(cache_fpath) > self.expires
def __call__(self, *args):
cache_fpath = self.get_cache_fname(*args)
if not self._expired(cache_fpath):
return pd.read_csv(cache_fpath, index_col=self.idx,
encoding="utf8", squeeze=True)
res = self.func(*args)
if isinstance(res, pd.DataFrame):
df = res
if len(df.columns) == 1 and self.idx == 1:
logging.warning(
"Single column dataframe is returned by %s.\nSince "
"it will cause inconsistent behavior with @fs_cache "
"decorator, please consider changing result type "
"to pd.Series", self.func.__name__)
if any(not isinstance(cname, str) for cname in df.columns):
logging.warning(
"Some of the dataframe columns aren't strings. "
"This will result in inconsistent naming "
"if read from filesystem cache.")
elif isinstance(res, pd.Series):
# starting pandas 0.25 DataFrame and Series to_csv()
# will become compatible, but at least until 0.30 comes out
# keep this for compatibility
df = pd.DataFrame(res)
else:
raise ValueError("Unsupported result type (pd.DataFrame or "
"pd.Series expected, got %s)" % type(res))
df.to_csv(cache_fpath, float_format="%g", encoding="utf-8")
return res
def expired(self, *args):
return self._expired(self.get_cache_fname(*args))
def cached(self, *args):
return not self.expired(*args)
def invalidate(self, *args):
try:
os.remove(self.get_cache_fname(*args))
except OSError:
return False
return True
|
CMUSTRUDEL/strudel.utils | stutils/mapreduce.py | map | python | def map(func, data, num_workers=None):
# type: (callable, Iterable, Optional[int]) -> Iterable
backend = ThreadPool(n_workers=num_workers)
iterable = None
# pd.Series didn't have .items() until pandas 0.21,
# so iteritems for older versions
for method in ('iterrows', 'iteritems', 'items'):
if hasattr(data, method):
iterable = getattr(data, method)()
break
if iterable is None:
iterable = enumerate(data)
mapped = {}
def collect(key):
def process(res):
mapped[key] = res
return process
for key, value in iterable:
backend.submit(func, key, value, callback=collect(key))
backend.shutdown()
if isinstance(data, pd.DataFrame):
return pd.DataFrame.from_dict(
mapped, orient='index').reindex(data.index)
elif isinstance(data, pd.Series):
return pd.Series(mapped).reindex(data.index)
elif isinstance(data, list):
return [mapped[i] for i in range(len(data))]
else:
# in Python, hash(<int>) := <int>, so guaranteed to be in order for list
# and tuple. For other types
return type(data)(mapped) | Map an iterable using multithreading
>>> s = pd.Series(range(120, 0, -1))
>>> s2 = map(lambda i, x: x ** 3.75, s)
>>> isinstance(s2, type(s))
True
>>> len(s) == len(s2)
True
>>> (s2 == s.map(lambda x: x ** 3.75)).all()
True
>>> s = list(range(120, 0, -1))
>>> s2 = map(lambda i, x: x ** 3.75, s)
>>> isinstance(s2, type(s))
True
>>> len(s) == len(s2)
True
>>> all(x ** 3.75 == s2[i] for i, x in enumerate(s))
True
>>> s = dict(enumerate(range(120, 0, -1)))
>>> s2 = map(lambda i, x: x ** 3.75, s)
>>> isinstance(s2, type(s))
True
>>> len(s) == len(s2)
True
>>> all(x ** 3.75 == s2[i] for i, x in s.items())
True | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/mapreduce.py#L137-L198 | [
"def submit(self, func, *args, **kwargs):\n # submit is executed from the main thread and expected to be synchronous\n callback = None\n if 'callback' in kwargs:\n callback = kwargs.pop('callback')\n assert callable(callback), \"Callback must be callable\"\n\n self.queue.put((func, args, kwargs, callback))\n\n if not self.started:\n self.start()\n",
"def shutdown(self):\n \"\"\"Wait for all threads to complete\"\"\"\n # cleanup\n self.started = False\n try:\n # nice way of doing things - let's wait until all items\n # in the queue are processed\n for t in self._threads:\n t.join()\n finally:\n # Emergency brake - if a KeyboardInterrupt is raised,\n # threads will finish processing current task and exit\n self.stopped = True\n",
"def collect(key):\n def process(res):\n mapped[key] = res\n return process\n"
] |
"""
Alternative approaches:
`asyncio`
- Python 3.4+. Unfortunately, I have to support Python 2, mainly because of
OSCAR project compatibility
`concurrent.futures`
- available since Python 3.2, but there is a backport
https://pypi.org/project/futures/
- backport follows pretty much the same pattern but still needs a wrapper
to support pandas objects. Native version (Python 3.2+) might give
some performance advantages
native `ThreadPool` (`from multiprocessing.pool import ThreadPool`):
- 30..50% slower than this implementation
(see test.TestMapReduce.test_native_threadpool)
- (minor) doesn't support pandas objects
"""
import pandas as pd
import six
import logging
import multiprocessing
import threading
import time
CPU_COUNT = multiprocessing.cpu_count()
class ThreadPool(object):
""" A slightly more performant replacement for native ThreadPool.
States:
started, stopped
False, False: just created, threads don't run yet
probably building up queue of tasks in the main thread
False, True: terminated before start. Shouldn't happen in the wild.
pretty useless state, identical to False, False
True, False: started and processing
True, True: finished processing or terminated
"""
_threads = None
queue = None
started = False
stopped = False
callback_semaphore = None
def __init__(self, n_workers=None):
# the only reason to use threadpool in Python is IO (because of GIL)
# so, we're not really limited with CPU and twice as many threads
# is usually fine as a default
self.n = n_workers or CPU_COUNT * 2
self.queue = six.moves.queue.Queue()
self.callback_semaphore = threading.Lock()
def start(self):
if self.started:
logging.warning("The pool is already started")
return
def worker():
while not self.stopped and (self.started or not self.queue.empty()):
# self.stopped: Terminate immediately
# self.started, queue is not empty: normal processing
# self.started, queue is empty: waiting for main thread
# to submit more tasks
# not self.started, queue is not empty: shutting down gracefully
# not self.started, queue is empty: done, exit
try:
func, args, kwargs, callback = self.queue.get(False)
except six.moves.queue.Empty:
time.sleep(0.1)
continue
else:
logging.debug("Got new data")
try:
result = func(*args, **kwargs)
except Exception as e:
logging.exception(e)
else:
logging.debug(
"Processed data: %s -> %s", str(args), str(result))
self.callback_semaphore.acquire()
try:
callback(result)
except Exception as e:
logging.exception(e)
finally:
self.callback_semaphore.release()
self._threads = [threading.Thread(target=worker) for _ in range(self.n)]
self.started = True
[t.start() for t in self._threads]
def submit(self, func, *args, **kwargs):
# submit is executed from the main thread and expected to be synchronous
callback = None
if 'callback' in kwargs:
callback = kwargs.pop('callback')
assert callable(callback), "Callback must be callable"
self.queue.put((func, args, kwargs, callback))
if not self.started:
self.start()
def shutdown(self):
"""Wait for all threads to complete"""
# cleanup
self.started = False
try:
# nice way of doing things - let's wait until all items
# in the queue are processed
for t in self._threads:
t.join()
finally:
# Emergency brake - if a KeyboardInterrupt is raised,
# threads will finish processing current task and exit
self.stopped = True
def __del__(self):
self.shutdown()
|
CMUSTRUDEL/strudel.utils | stutils/mapreduce.py | ThreadPool.shutdown | python | def shutdown(self):
# cleanup
self.started = False
try:
# nice way of doing things - let's wait until all items
# in the queue are processed
for t in self._threads:
t.join()
finally:
# Emergency brake - if a KeyboardInterrupt is raised,
# threads will finish processing current task and exit
self.stopped = True | Wait for all threads to complete | train | https://github.com/CMUSTRUDEL/strudel.utils/blob/888ef72fcdb851b5873092bc9c4d6958733691f2/stutils/mapreduce.py#L119-L131 | null | class ThreadPool(object):
""" A slightly more performant replacement for native ThreadPool.
States:
started, stopped
False, False: just created, threads don't run yet
probably building up queue of tasks in the main thread
False, True: terminated before start. Shouldn't happen in the wild.
pretty useless state, identical to False, False
True, False: started and processing
True, True: finished processing or terminated
"""
_threads = None
queue = None
started = False
stopped = False
callback_semaphore = None
def __init__(self, n_workers=None):
# the only reason to use threadpool in Python is IO (because of GIL)
# so, we're not really limited with CPU and twice as many threads
# is usually fine as a default
self.n = n_workers or CPU_COUNT * 2
self.queue = six.moves.queue.Queue()
self.callback_semaphore = threading.Lock()
def start(self):
if self.started:
logging.warning("The pool is already started")
return
def worker():
while not self.stopped and (self.started or not self.queue.empty()):
# self.stopped: Terminate immediately
# self.started, queue is not empty: normal processing
# self.started, queue is empty: waiting for main thread
# to submit more tasks
# not self.started, queue is not empty: shutting down gracefully
# not self.started, queue is empty: done, exit
try:
func, args, kwargs, callback = self.queue.get(False)
except six.moves.queue.Empty:
time.sleep(0.1)
continue
else:
logging.debug("Got new data")
try:
result = func(*args, **kwargs)
except Exception as e:
logging.exception(e)
else:
logging.debug(
"Processed data: %s -> %s", str(args), str(result))
self.callback_semaphore.acquire()
try:
callback(result)
except Exception as e:
logging.exception(e)
finally:
self.callback_semaphore.release()
self._threads = [threading.Thread(target=worker) for _ in range(self.n)]
self.started = True
[t.start() for t in self._threads]
def submit(self, func, *args, **kwargs):
# submit is executed from the main thread and expected to be synchronous
callback = None
if 'callback' in kwargs:
callback = kwargs.pop('callback')
assert callable(callback), "Callback must be callable"
self.queue.put((func, args, kwargs, callback))
if not self.started:
self.start()
def __del__(self):
self.shutdown()
|
cstatz/maui | maui/backend/index.py | InverseIndexMapper.slice_local_to_global | python | def slice_local_to_global(self, index_slice, axis=0):
local_start = self.int_local_to_global_start(index_slice.start, axis)
local_stop = self.int_local_to_global_stop(index_slice.stop, axis)
return slice(local_start,local_stop,index_slice.step) | Calculate start and stop index for mapping sliced index
:param index_slice: sliced index?
:param axis: current axis to calculate
:return: slice object as calculated | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/index.py#L31-L42 | [
"def int_local_to_global_start(self, index, axis=0):\n \"\"\" Calculate local index from global index from start_index\n\n :param index: global index as integer\n :param axis: current axis to process\n :return:\n \"\"\"\n\n return index + self.__mask[axis].start\n",
"def int_local_to_global_stop(self, index, axis=0):\n \"\"\" Calculate local index from global index from stop_index\n\n :param index: global index as integer\n :param axis: current axis to process\n :return:\n \"\"\"\n\n return index + self.__mask[axis].start\n"
] | class InverseIndexMapper(object):
""" class to map local indexes to global ones """
def __init__(self, mask, halos=None):
""" Constructor of class InverseIndexMapper
:param start_index: local start index as tuple (x1,y1,z1) or slice
:param stop_index: local stop index as tuple (x2,y2,z3) or slice
"""
if halos is None:
tmp = tuple([0 for _ in range(len(mask))])
halos = (tmp, tmp)
self.__mask = mask
def __getitem__(self, index):
return self.local_to_global(index)
def local_to_global(self, index):
""" Calculate local index from global index
:param index: input index
:return: local index for data
"""
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_local_to_global(index)
elif type(index) is slice:
return self.slice_local_to_global(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_local_to_global(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_local_to_global(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type')
def int_local_to_global_start(self, index, axis=0):
""" Calculate local index from global index from start_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
return index + self.__mask[axis].start
def int_local_to_global_stop(self, index, axis=0):
""" Calculate local index from global index from stop_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
return index + self.__mask[axis].start
def int_local_to_global(self, index, axis=0):
""" Calculate local index from global index for integer input
:param index: global index as integer
:param axis: current axis to process
:return:
"""
return index + self.__mask[axis].start
|
cstatz/maui | maui/backend/index.py | InverseIndexMapper.local_to_global | python | def local_to_global(self, index):
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_local_to_global(index)
elif type(index) is slice:
return self.slice_local_to_global(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_local_to_global(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_local_to_global(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type') | Calculate local index from global index
:param index: input index
:return: local index for data | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/index.py#L45-L87 | [
"def slice_local_to_global(self, index_slice, axis=0):\n \"\"\" Calculate start and stop index for mapping sliced index\n\n :param index_slice: sliced index?\n :param axis: current axis to calculate\n :return: slice object as calculated\n \"\"\"\n\n local_start = self.int_local_to_global_start(index_slice.start, axis)\n local_stop = self.int_local_to_global_stop(index_slice.stop, axis)\n\n return slice(local_start,local_stop,index_slice.step)\n",
"def int_local_to_global(self, index, axis=0):\n \"\"\" Calculate local index from global index for integer input\n\n :param index: global index as integer\n :param axis: current axis to process\n :return:\n \"\"\"\n\n return index + self.__mask[axis].start\n"
] | class InverseIndexMapper(object):
""" class to map local indexes to global ones """
def __init__(self, mask, halos=None):
""" Constructor of class InverseIndexMapper
:param start_index: local start index as tuple (x1,y1,z1) or slice
:param stop_index: local stop index as tuple (x2,y2,z3) or slice
"""
if halos is None:
tmp = tuple([0 for _ in range(len(mask))])
halos = (tmp, tmp)
self.__mask = mask
def __getitem__(self, index):
return self.local_to_global(index)
def slice_local_to_global(self, index_slice, axis=0):
""" Calculate start and stop index for mapping sliced index
:param index_slice: sliced index?
:param axis: current axis to calculate
:return: slice object as calculated
"""
local_start = self.int_local_to_global_start(index_slice.start, axis)
local_stop = self.int_local_to_global_stop(index_slice.stop, axis)
return slice(local_start,local_stop,index_slice.step)
def int_local_to_global_start(self, index, axis=0):
""" Calculate local index from global index from start_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
return index + self.__mask[axis].start
def int_local_to_global_stop(self, index, axis=0):
""" Calculate local index from global index from stop_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
return index + self.__mask[axis].start
def int_local_to_global(self, index, axis=0):
""" Calculate local index from global index for integer input
:param index: global index as integer
:param axis: current axis to process
:return:
"""
return index + self.__mask[axis].start
|
cstatz/maui | maui/backend/index.py | IndexMapper.slice_global_to_local | python | def slice_global_to_local(self, index_slice, axis=0):
if index_slice.stop < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index_slice.start > self.__mask[axis].stop-self.__halos[1][axis]:
return None
local_start = self.int_global_to_local_start(index_slice.start, axis)
local_stop = self.int_global_to_local_stop(index_slice.stop, axis)
return slice(local_start,local_stop,index_slice.step) | Calculate start and stop index for mapping sliced index
:param index_slice: sliced index?
:param axis: current axis to calculate
:return: slice object as calculated | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/index.py#L139-L155 | null | class IndexMapper(object):
""" class to map global indexes to local ones """
def __init__(self, mask, halos=None):
""" Constructor of class IndexMapper
:param start_index: local start index as tuple (x1,y1,z1) or slice
:param stop_index: local stop index as tuple (x2,y2,z3) or slice
"""
if halos is None:
tmp = tuple([0 for _ in range(len(mask))])
halos = (tmp, tmp)
self.__halos = halos
self.__mask = mask
def __getitem__(self, index):
return self.global_to_local(index)
def global_to_local(self, index):
""" Calculate local index from global index
:param index: input index
:return: local index for data
"""
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_global_to_local(index)
elif type(index) is slice:
return self.slice_global_to_local(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
#if len(index) is not len(self.__mask):
# raise IndexError('check length of parameter index')
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_global_to_local(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_global_to_local(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type')
def int_global_to_local_start(self, index, axis=0):
""" Calculate local index from global index from start_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start:
return 0
return index-self.__mask[axis].start
def int_global_to_local_stop(self, index, axis=0):
""" Calculate local index from global index from stop_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index > self.__mask[axis].stop:
return self.__mask[axis].stop-self.__mask[axis].start
return index-self.__mask[axis].start
def int_global_to_local(self, index, axis=0):
""" Calculate local index from global index for integer input
:param index: global index as integer
:param axis: current axis to process
:return:
"""
# Warum >= an dieser Stelle. Eigentlich sollte > ausreichend sein! Test!
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
return index-self.__mask[axis].start
|
cstatz/maui | maui/backend/index.py | IndexMapper.global_to_local | python | def global_to_local(self, index):
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_global_to_local(index)
elif type(index) is slice:
return self.slice_global_to_local(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
#if len(index) is not len(self.__mask):
# raise IndexError('check length of parameter index')
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_global_to_local(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_global_to_local(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type') | Calculate local index from global index
:param index: input index
:return: local index for data | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/index.py#L158-L202 | [
"def slice_global_to_local(self, index_slice, axis=0):\n \"\"\" Calculate start and stop index for mapping sliced index\n\n :param index_slice: sliced index?\n :param axis: current axis to calculate\n :return: slice object as calculated\n \"\"\"\n if index_slice.stop < self.__mask[axis].start+self.__halos[0][axis]:\n return None\n\n if index_slice.start > self.__mask[axis].stop-self.__halos[1][axis]:\n return None\n\n local_start = self.int_global_to_local_start(index_slice.start, axis)\n local_stop = self.int_global_to_local_stop(index_slice.stop, axis)\n\n return slice(local_start,local_stop,index_slice.step)\n",
"def int_global_to_local(self, index, axis=0):\n \"\"\" Calculate local index from global index for integer input\n\n :param index: global index as integer\n :param axis: current axis to process\n :return:\n \"\"\"\n\n # Warum >= an dieser Stelle. Eigentlich sollte > ausreichend sein! Test!\n if index >= self.__mask[axis].stop-self.__halos[1][axis]:\n return None\n\n if index < self.__mask[axis].start+self.__halos[0][axis]:\n return None\n\n return index-self.__mask[axis].start\n"
] | class IndexMapper(object):
""" class to map global indexes to local ones """
def __init__(self, mask, halos=None):
""" Constructor of class IndexMapper
:param start_index: local start index as tuple (x1,y1,z1) or slice
:param stop_index: local stop index as tuple (x2,y2,z3) or slice
"""
if halos is None:
tmp = tuple([0 for _ in range(len(mask))])
halos = (tmp, tmp)
self.__halos = halos
self.__mask = mask
def __getitem__(self, index):
return self.global_to_local(index)
def slice_global_to_local(self, index_slice, axis=0):
""" Calculate start and stop index for mapping sliced index
:param index_slice: sliced index?
:param axis: current axis to calculate
:return: slice object as calculated
"""
if index_slice.stop < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index_slice.start > self.__mask[axis].stop-self.__halos[1][axis]:
return None
local_start = self.int_global_to_local_start(index_slice.start, axis)
local_stop = self.int_global_to_local_stop(index_slice.stop, axis)
return slice(local_start,local_stop,index_slice.step)
def int_global_to_local_start(self, index, axis=0):
""" Calculate local index from global index from start_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start:
return 0
return index-self.__mask[axis].start
def int_global_to_local_stop(self, index, axis=0):
""" Calculate local index from global index from stop_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index > self.__mask[axis].stop:
return self.__mask[axis].stop-self.__mask[axis].start
return index-self.__mask[axis].start
def int_global_to_local(self, index, axis=0):
""" Calculate local index from global index for integer input
:param index: global index as integer
:param axis: current axis to process
:return:
"""
# Warum >= an dieser Stelle. Eigentlich sollte > ausreichend sein! Test!
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
return index-self.__mask[axis].start
|
cstatz/maui | maui/backend/index.py | IndexMapper.int_global_to_local_start | python | def int_global_to_local_start(self, index, axis=0):
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start:
return 0
return index-self.__mask[axis].start | Calculate local index from global index from start_index
:param index: global index as integer
:param axis: current axis to process
:return: | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/index.py#L204-L217 | null | class IndexMapper(object):
""" class to map global indexes to local ones """
def __init__(self, mask, halos=None):
""" Constructor of class IndexMapper
:param start_index: local start index as tuple (x1,y1,z1) or slice
:param stop_index: local stop index as tuple (x2,y2,z3) or slice
"""
if halos is None:
tmp = tuple([0 for _ in range(len(mask))])
halos = (tmp, tmp)
self.__halos = halos
self.__mask = mask
def __getitem__(self, index):
return self.global_to_local(index)
def slice_global_to_local(self, index_slice, axis=0):
""" Calculate start and stop index for mapping sliced index
:param index_slice: sliced index?
:param axis: current axis to calculate
:return: slice object as calculated
"""
if index_slice.stop < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index_slice.start > self.__mask[axis].stop-self.__halos[1][axis]:
return None
local_start = self.int_global_to_local_start(index_slice.start, axis)
local_stop = self.int_global_to_local_stop(index_slice.stop, axis)
return slice(local_start,local_stop,index_slice.step)
def global_to_local(self, index):
""" Calculate local index from global index
:param index: input index
:return: local index for data
"""
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_global_to_local(index)
elif type(index) is slice:
return self.slice_global_to_local(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
#if len(index) is not len(self.__mask):
# raise IndexError('check length of parameter index')
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_global_to_local(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_global_to_local(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type')
def int_global_to_local_stop(self, index, axis=0):
""" Calculate local index from global index from stop_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index > self.__mask[axis].stop:
return self.__mask[axis].stop-self.__mask[axis].start
return index-self.__mask[axis].start
def int_global_to_local(self, index, axis=0):
""" Calculate local index from global index for integer input
:param index: global index as integer
:param axis: current axis to process
:return:
"""
# Warum >= an dieser Stelle. Eigentlich sollte > ausreichend sein! Test!
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
return index-self.__mask[axis].start
|
cstatz/maui | maui/backend/index.py | IndexMapper.int_global_to_local_stop | python | def int_global_to_local_stop(self, index, axis=0):
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index > self.__mask[axis].stop:
return self.__mask[axis].stop-self.__mask[axis].start
return index-self.__mask[axis].start | Calculate local index from global index from stop_index
:param index: global index as integer
:param axis: current axis to process
:return: | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/index.py#L219-L232 | null | class IndexMapper(object):
""" class to map global indexes to local ones """
def __init__(self, mask, halos=None):
""" Constructor of class IndexMapper
:param start_index: local start index as tuple (x1,y1,z1) or slice
:param stop_index: local stop index as tuple (x2,y2,z3) or slice
"""
if halos is None:
tmp = tuple([0 for _ in range(len(mask))])
halos = (tmp, tmp)
self.__halos = halos
self.__mask = mask
def __getitem__(self, index):
return self.global_to_local(index)
def slice_global_to_local(self, index_slice, axis=0):
""" Calculate start and stop index for mapping sliced index
:param index_slice: sliced index?
:param axis: current axis to calculate
:return: slice object as calculated
"""
if index_slice.stop < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index_slice.start > self.__mask[axis].stop-self.__halos[1][axis]:
return None
local_start = self.int_global_to_local_start(index_slice.start, axis)
local_stop = self.int_global_to_local_stop(index_slice.stop, axis)
return slice(local_start,local_stop,index_slice.step)
def global_to_local(self, index):
""" Calculate local index from global index
:param index: input index
:return: local index for data
"""
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_global_to_local(index)
elif type(index) is slice:
return self.slice_global_to_local(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
#if len(index) is not len(self.__mask):
# raise IndexError('check length of parameter index')
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_global_to_local(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_global_to_local(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type')
def int_global_to_local_start(self, index, axis=0):
""" Calculate local index from global index from start_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start:
return 0
return index-self.__mask[axis].start
def int_global_to_local(self, index, axis=0):
""" Calculate local index from global index for integer input
:param index: global index as integer
:param axis: current axis to process
:return:
"""
# Warum >= an dieser Stelle. Eigentlich sollte > ausreichend sein! Test!
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
return index-self.__mask[axis].start
|
cstatz/maui | maui/backend/index.py | IndexMapper.int_global_to_local | python | def int_global_to_local(self, index, axis=0):
# Warum >= an dieser Stelle. Eigentlich sollte > ausreichend sein! Test!
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
return index-self.__mask[axis].start | Calculate local index from global index for integer input
:param index: global index as integer
:param axis: current axis to process
:return: | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/index.py#L234-L249 | null | class IndexMapper(object):
""" class to map global indexes to local ones """
def __init__(self, mask, halos=None):
""" Constructor of class IndexMapper
:param start_index: local start index as tuple (x1,y1,z1) or slice
:param stop_index: local stop index as tuple (x2,y2,z3) or slice
"""
if halos is None:
tmp = tuple([0 for _ in range(len(mask))])
halos = (tmp, tmp)
self.__halos = halos
self.__mask = mask
def __getitem__(self, index):
return self.global_to_local(index)
def slice_global_to_local(self, index_slice, axis=0):
""" Calculate start and stop index for mapping sliced index
:param index_slice: sliced index?
:param axis: current axis to calculate
:return: slice object as calculated
"""
if index_slice.stop < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index_slice.start > self.__mask[axis].stop-self.__halos[1][axis]:
return None
local_start = self.int_global_to_local_start(index_slice.start, axis)
local_stop = self.int_global_to_local_stop(index_slice.stop, axis)
return slice(local_start,local_stop,index_slice.step)
def global_to_local(self, index):
""" Calculate local index from global index
:param index: input index
:return: local index for data
"""
if (type(index) is int) or (type(index) is slice):
if len(self.__mask) > 1:
raise IndexError('check length of parameter index')
# 1D array
if type(index) is int:
return self.int_global_to_local(index)
elif type(index) is slice:
return self.slice_global_to_local(index)
else:
raise IndexError('check data type of index to be integer or slice')
elif type(index) is tuple:
#if len(index) is not len(self.__mask):
# raise IndexError('check length of parameter index')
local_index = []
for k, item in enumerate(index):
if k < len(self.__mask):
if type(item) is slice:
temp_index = self.slice_global_to_local(item, k)
elif type(item) in [int, np.int64, np.int32]:
temp_index = self.int_global_to_local(item, k)
if temp_index is None:
return temp_index
else:
temp_index = item
local_index.append(temp_index)
return tuple(local_index)
else:
raise IndexError('check index for correct length and type')
def int_global_to_local_start(self, index, axis=0):
""" Calculate local index from global index from start_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
if index >= self.__mask[axis].stop-self.__halos[1][axis]:
return None
if index < self.__mask[axis].start:
return 0
return index-self.__mask[axis].start
def int_global_to_local_stop(self, index, axis=0):
""" Calculate local index from global index from stop_index
:param index: global index as integer
:param axis: current axis to process
:return:
"""
if index < self.__mask[axis].start+self.__halos[0][axis]:
return None
if index > self.__mask[axis].stop:
return self.__mask[axis].stop-self.__mask[axis].start
return index-self.__mask[axis].start
|
cstatz/maui | maui/backend/index.py | IndexBoundsHandler.int_out_of_bounds | python | def int_out_of_bounds(self, index, axis=0):
#if index >= self._global_shape[axis]:
if index > self._global_shape[axis]:
raise IndexError('index is larger than the upper bound')
# wrap around index if negative like in python
if index < 0:
index += self._global_shape[axis]
#warnings.warn('warp around may occur')
# check for invalid wrap around
if index < 0:
raise IndexError('index is smaller than the lower bound')
return index | examples if index is out of local processing bounds
function is used to perform examples for index of type integer
:param index: global index to examples as type int
:param axis: current axis to examples
:return: return input or raise error | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/index.py#L264-L285 | null | class IndexBoundsHandler(object):
""" class to handle bound errors in mpi wrapper """
def __init__(self, global_shape):
""" Constructor of IndexBoundsHandler
:param global_shape: global shape of data where to check bounds
"""
self._global_shape = global_shape
def __getitem__(self, index):
return self.out_of_bounds(index)
def slice_out_of_bounds(self, index, axis=0):
start = index.start
stop = index.stop
if start is None:
start = 0
if stop is None:
stop = self._global_shape[axis]
# stop-=1
index_start = self.int_out_of_bounds(start, axis)
index_stop = self.int_out_of_bounds(stop, axis)
return slice(index_start, index_stop, index.step)
def out_of_bounds(self, index):
""" Check index for out of bounds
:param index: index as integer, tuple or slice
:return: local index as tuple
"""
if type(index) is int:
return self.int_out_of_bounds(index)
elif type(index) is slice:
return self.slice_out_of_bounds(index)
elif type(index) is tuple:
local_index = []
for k, item in enumerate(index):
if type(item) is slice:
temp_index = self.slice_out_of_bounds(item, k)
elif type(item) is int:
temp_index = self.int_out_of_bounds(item, k)
# FIXME: will fail if item is no int or slice!
if temp_index is None:
return temp_index
local_index.append(temp_index)
return tuple(local_index)
|
cstatz/maui | maui/backend/index.py | IndexBoundsHandler.out_of_bounds | python | def out_of_bounds(self, index):
if type(index) is int:
return self.int_out_of_bounds(index)
elif type(index) is slice:
return self.slice_out_of_bounds(index)
elif type(index) is tuple:
local_index = []
for k, item in enumerate(index):
if type(item) is slice:
temp_index = self.slice_out_of_bounds(item, k)
elif type(item) is int:
temp_index = self.int_out_of_bounds(item, k)
# FIXME: will fail if item is no int or slice!
if temp_index is None:
return temp_index
local_index.append(temp_index)
return tuple(local_index) | Check index for out of bounds
:param index: index as integer, tuple or slice
:return: local index as tuple | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/index.py#L304-L333 | [
"def int_out_of_bounds(self, index, axis=0):\n \"\"\" examples if index is out of local processing bounds\n\n function is used to perform examples for index of type integer\n :param index: global index to examples as type int\n :param axis: current axis to examples\n :return: return input or raise error\n \"\"\"\n #if index >= self._global_shape[axis]:\n if index > self._global_shape[axis]:\n raise IndexError('index is larger than the upper bound')\n\n # wrap around index if negative like in python\n if index < 0:\n index += self._global_shape[axis]\n #warnings.warn('warp around may occur')\n\n # check for invalid wrap around\n if index < 0:\n raise IndexError('index is smaller than the lower bound')\n\n return index\n",
"def slice_out_of_bounds(self, index, axis=0):\n start = index.start\n stop = index.stop\n\n if start is None:\n start = 0\n\n if stop is None:\n stop = self._global_shape[axis]\n\n # stop-=1\n\n index_start = self.int_out_of_bounds(start, axis)\n index_stop = self.int_out_of_bounds(stop, axis)\n\n return slice(index_start, index_stop, index.step)\n"
] | class IndexBoundsHandler(object):
""" class to handle bound errors in mpi wrapper """
def __init__(self, global_shape):
""" Constructor of IndexBoundsHandler
:param global_shape: global shape of data where to check bounds
"""
self._global_shape = global_shape
def __getitem__(self, index):
return self.out_of_bounds(index)
def int_out_of_bounds(self, index, axis=0):
""" examples if index is out of local processing bounds
function is used to perform examples for index of type integer
:param index: global index to examples as type int
:param axis: current axis to examples
:return: return input or raise error
"""
#if index >= self._global_shape[axis]:
if index > self._global_shape[axis]:
raise IndexError('index is larger than the upper bound')
# wrap around index if negative like in python
if index < 0:
index += self._global_shape[axis]
#warnings.warn('warp around may occur')
# check for invalid wrap around
if index < 0:
raise IndexError('index is smaller than the lower bound')
return index
def slice_out_of_bounds(self, index, axis=0):
start = index.start
stop = index.stop
if start is None:
start = 0
if stop is None:
stop = self._global_shape[axis]
# stop-=1
index_start = self.int_out_of_bounds(start, axis)
index_stop = self.int_out_of_bounds(stop, axis)
return slice(index_start, index_stop, index.step)
|
cstatz/maui | maui/field/view.py | View.__math | python | def __math(self, f, x):
d = {}
#operation with single number
if isinstance(x, (int, long, float, complex)):
for i in self.d: d[i] = f(self.d[i], x)
return d
#operation with other view (must have same size and partitions as self) or Field from same mesh and same bounds like View
elif isinstance(x, View) or isinstance(x, Field):
try:
for i in self.d:
d[i] = f(self.d[i], x.d[i])
return d
except: raise ValueError('Views have to be partitioned and shaped in the same way to be add/sub/mul/div/pow/mod\nField has to have same bounds and origin mesh as View.')
#operation with numpy array
elif isinstance(x, ndarray):
#array has to be of the same Size as View
try:
for i in self.d:
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as View for operation')
else: raise ValueError('Operators only available for View and (View, Field, numpy.ndarray with same shape View, integer, float, complex).') | operator function
:param f: operator.add/sub/mul... used operator
:param x: other object view should be add/sub... with (other View, number, numpy.ndarray, Field)
:return: dictionary (same shape as field.d) with result of operation | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/field/view.py#L112-L141 | [
"def __indices(self, slice_partition, slice_mask):\n #function to generate Indices for operations with Array and Field with bounds different from mesh bounds\n ind=[]\n for j in xrange(len(slice_partition)):\n if slice_partition[j].start <= slice_mask[j].start:\n start = None\n else:\n start = slice_partition[j].start - slice_mask[j].start\n if slice_partition[j].stop >= slice_mask[j].stop:\n stop = None\n else:\n stop = slice_partition[j].stop - slice_mask[j].start\n ind.append(slice(start, stop, None))\n return tuple(ind)\n"
] | class View(Field):
def __init__(self, field, partition=None, bounds=None):
""" View prototype class.
:param partition: Partition or Mesh, coordinate space associated with the field.
:param name: String, unique name of the field/variable.
:param unit: String, physical unit associated with the field.
:param rank: Integer, rank of the field.
:param interpolation: Interpolator, class to obtain value from the field by coordinate.
:param bounds: 2-Tuple of Tuples, bounds of the field inside the mesh.
"""
self.__field = field
if partition is None:
self.__partition = self.__field.partition.copy(bounds=bounds)
else:
self.__partition = partition.copy(bounds=bounds)
self.__bounds = bounds
self.__domain_data = self.__field.data
self.__d = self.__field.data.d # Dict of low level data stores
self.__start_index = self.__partition.mesh.nearest_node(bounds[0])[0]
self.__stop_index = self.__partition.mesh.nearest_node(bounds[1])[0]
tmp = list(self.__stop_index)
for i in range(len(tmp)):
tmp[i] += 1
self.__stop_index = tuple(tmp)
self.__mask = create_mask_from_indices((self.__start_index, self.__stop_index))
def __getitem__(self, index):
# Mask
# Only check bounds using start and stop index!
return self.__domain_data[index]
def __setitem__(self, index, data):
# Mask
# Only check bounds!
self.__domain_data[index] = data
@property
def bounds(self):
return self.__bounds
@property
def partition(self):
return self.__partition
@property
def rank(self):
return self.__field.rank
@property
def name(self):
return self.__field.name
@property
def unit(self):
return self.__field.unit
@property
def interpolation(self):
return self.__field.interpolation
@property
def d(self):
""" Primitive data property.
:return: dict() of numpy.ndarrays with the keys of the dict being the coordinate of the corresponding Domain
"""
return self.__field[self.__mask]
def sync(self):
self.__domain_data.sync()
def __indices(self, slice_partition, slice_mask):
#function to generate Indices for operations with Array and Field with bounds different from mesh bounds
ind=[]
for j in xrange(len(slice_partition)):
if slice_partition[j].start <= slice_mask[j].start:
start = None
else:
start = slice_partition[j].start - slice_mask[j].start
if slice_partition[j].stop >= slice_mask[j].stop:
stop = None
else:
stop = slice_partition[j].stop - slice_mask[j].start
ind.append(slice(start, stop, None))
return tuple(ind)
def __add__(self, x):
return self.__math(operator.add, x)
def __radd__(self, x):
return self.__math(operator.add, x)
def __iadd__(self, x):
for i in self.d: self.d[i][:] = self.__add__(x)[i][:]
return self
def __sub__(self, x):
return self.__math(operator.sub, x)
def __rsub__(self, x):
return self.__math(operator.sub, x)
def __isub__(self, x):
for i in self.d: self.d[i][:] = self.__sub__(x)[i][:]
return self
def __mul__(self, x):
return self.__math(operator.mul, x)
def __rmul__(self, x):
return self.__math(operator.mul, x)
def __imul__(self, x):
for i in self.d: self.d[i][:] = self.__mul__(x)[i][:]
return self
def __div__(self, x):
return self.__math(operator.div, x)
def __rdiv__(self, x):
return self.__math(operator.div, x)
def __idiv__(self, x):
for i in self.d: self.d[i][:] = self.__div__(x)[i][:]
return self
def __truediv__(self, x):
return self.__div__(x)
def __rtruediv__(self, x):
return self.__div__(x)
def __itruediv__(self, x):
for i in self.d: self.d[i][:] = self.__truediv__(x)[i][:]
return self
def __mod__(self, x):
return self.__math(operator.mod, x)
def __rmod__(self, x):
return self.__math(operator.mod, x)
def __imod__(self, x):
for i in self.d: self.d[i][:] = self.__mod__(x)[i][:]
return self
def __pow__(self, x):
return self.__math(operator.pow, x)
def __rpow__(self, x):
return self.__math(operator.pow, x)
def __ipow__(self, x):
for i in self.d: self.d[i][:] = self.__pow__(x)[i][:]
return self
def __array_op(self, f, x, axis):
"""operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)
:param f: operator function
:param x: array(1D, 2D) or field (2D) or View (2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:
raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')
d ={}
#x is a vector (only numpy ndarray)
if isinstance(axis, int) and isinstance(x, ndarray):
if len(self.__partition.mesh.bounds[0]) == 3:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:, newaxis])
elif axis == 2:
d[i] = f(self.d[i], x[ind[2]])
else:
raise ValueError('"axis" can only have value 0, 1 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
elif len(self.__partition.mesh.bounds[0]) == 2:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:])
else:
raise ValueError('"axis" can only have value 0 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
#x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)
elif len(axis) == 2:
#operation for 2D-arrays
if isinstance(x, ndarray):
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == (0, 1) or axis == (1, 0):
d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])
elif axis == (1, 2) or axis == (2, 1):
d[i] = f(self.d[i], x[ind[1], ind[2]])
elif axis == (0, 2) or axis == (2, 0):
d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])
else:
raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))
#operation for 2D Fields or View (Field from same origin mesh but bounds like View has)
elif isinstance(x, Field) or isinstance(x, View):
if axis == (0, 1) or axis == (1, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])
except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')
elif axis == (1, 2) or axis == (2, 1):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])
except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')
elif axis == (0, 2) or axis == (2, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])
except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')
else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')
else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')
return d
def add(self, x, axis):
"""Function to add 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.add, x, axis)
def sub(self, x, axis):
"""Function to sub 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.sub, x, axis)
def mul(self, x, axis):
"""Function to multiply 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mul, x, axis)
def div(self, x, axis):
"""Function to divide 3D View by vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.div, x, axis)
def mod(self, x, axis):
"""Function to modulo 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mod, x, axis)
def pow(self, x, axis):
"""Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.pow, x, axis)
|
cstatz/maui | maui/field/view.py | View.add | python | def add(self, x, axis):
return self.__array_op(operator.add, x, axis) | Function to add 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/field/view.py#L288-L294 | [
"def __array_op(self, f, x, axis):\n \"\"\"operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)\n :param f: operator function\n :param x: array(1D, 2D) or field (2D) or View (2D)\n :param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis\n :return: dict with result of operation (same form as field.d)\n \"\"\"\n if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:\n raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')\n\n d ={}\n #x is a vector (only numpy ndarray)\n if isinstance(axis, int) and isinstance(x, ndarray):\n if len(self.__partition.mesh.bounds[0]) == 3:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:, newaxis])\n elif axis == 2:\n d[i] = f(self.d[i], x[ind[2]])\n else:\n raise ValueError('\"axis\" can only have value 0, 1 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n elif len(self.__partition.mesh.bounds[0]) == 2:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:])\n else:\n raise ValueError('\"axis\" can only have value 0 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n #x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)\n elif len(axis) == 2:\n #operation for 2D-arrays\n if isinstance(x, ndarray):\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == (0, 1) or axis == (1, 0):\n d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])\n elif axis == (1, 2) or axis == (2, 1):\n d[i] = f(self.d[i], x[ind[1], ind[2]])\n elif axis == (0, 2) or axis == (2, 0):\n d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])\n else:\n raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))\n #operation for 2D Fields or View (Field from same origin mesh but bounds like View has)\n elif isinstance(x, Field) or isinstance(x, View):\n if axis == (0, 1) or axis == (1, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])\n except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')\n elif axis == (1, 2) or axis == (2, 1):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])\n except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')\n elif axis == (0, 2) or axis == (2, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])\n except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')\n else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')\n\n else: raise ValueError('Argument \"axis\" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')\n\n return d\n"
] | class View(Field):
def __init__(self, field, partition=None, bounds=None):
""" View prototype class.
:param partition: Partition or Mesh, coordinate space associated with the field.
:param name: String, unique name of the field/variable.
:param unit: String, physical unit associated with the field.
:param rank: Integer, rank of the field.
:param interpolation: Interpolator, class to obtain value from the field by coordinate.
:param bounds: 2-Tuple of Tuples, bounds of the field inside the mesh.
"""
self.__field = field
if partition is None:
self.__partition = self.__field.partition.copy(bounds=bounds)
else:
self.__partition = partition.copy(bounds=bounds)
self.__bounds = bounds
self.__domain_data = self.__field.data
self.__d = self.__field.data.d # Dict of low level data stores
self.__start_index = self.__partition.mesh.nearest_node(bounds[0])[0]
self.__stop_index = self.__partition.mesh.nearest_node(bounds[1])[0]
tmp = list(self.__stop_index)
for i in range(len(tmp)):
tmp[i] += 1
self.__stop_index = tuple(tmp)
self.__mask = create_mask_from_indices((self.__start_index, self.__stop_index))
def __getitem__(self, index):
# Mask
# Only check bounds using start and stop index!
return self.__domain_data[index]
def __setitem__(self, index, data):
# Mask
# Only check bounds!
self.__domain_data[index] = data
@property
def bounds(self):
return self.__bounds
@property
def partition(self):
return self.__partition
@property
def rank(self):
return self.__field.rank
@property
def name(self):
return self.__field.name
@property
def unit(self):
return self.__field.unit
@property
def interpolation(self):
return self.__field.interpolation
@property
def d(self):
""" Primitive data property.
:return: dict() of numpy.ndarrays with the keys of the dict being the coordinate of the corresponding Domain
"""
return self.__field[self.__mask]
def sync(self):
self.__domain_data.sync()
def __indices(self, slice_partition, slice_mask):
#function to generate Indices for operations with Array and Field with bounds different from mesh bounds
ind=[]
for j in xrange(len(slice_partition)):
if slice_partition[j].start <= slice_mask[j].start:
start = None
else:
start = slice_partition[j].start - slice_mask[j].start
if slice_partition[j].stop >= slice_mask[j].stop:
stop = None
else:
stop = slice_partition[j].stop - slice_mask[j].start
ind.append(slice(start, stop, None))
return tuple(ind)
def __math(self, f, x):
"""operator function
:param f: operator.add/sub/mul... used operator
:param x: other object view should be add/sub... with (other View, number, numpy.ndarray, Field)
:return: dictionary (same shape as field.d) with result of operation
"""
d = {}
#operation with single number
if isinstance(x, (int, long, float, complex)):
for i in self.d: d[i] = f(self.d[i], x)
return d
#operation with other view (must have same size and partitions as self) or Field from same mesh and same bounds like View
elif isinstance(x, View) or isinstance(x, Field):
try:
for i in self.d:
d[i] = f(self.d[i], x.d[i])
return d
except: raise ValueError('Views have to be partitioned and shaped in the same way to be add/sub/mul/div/pow/mod\nField has to have same bounds and origin mesh as View.')
#operation with numpy array
elif isinstance(x, ndarray):
#array has to be of the same Size as View
try:
for i in self.d:
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as View for operation')
else: raise ValueError('Operators only available for View and (View, Field, numpy.ndarray with same shape View, integer, float, complex).')
def __add__(self, x):
return self.__math(operator.add, x)
def __radd__(self, x):
return self.__math(operator.add, x)
def __iadd__(self, x):
for i in self.d: self.d[i][:] = self.__add__(x)[i][:]
return self
def __sub__(self, x):
return self.__math(operator.sub, x)
def __rsub__(self, x):
return self.__math(operator.sub, x)
def __isub__(self, x):
for i in self.d: self.d[i][:] = self.__sub__(x)[i][:]
return self
def __mul__(self, x):
return self.__math(operator.mul, x)
def __rmul__(self, x):
return self.__math(operator.mul, x)
def __imul__(self, x):
for i in self.d: self.d[i][:] = self.__mul__(x)[i][:]
return self
def __div__(self, x):
return self.__math(operator.div, x)
def __rdiv__(self, x):
return self.__math(operator.div, x)
def __idiv__(self, x):
for i in self.d: self.d[i][:] = self.__div__(x)[i][:]
return self
def __truediv__(self, x):
return self.__div__(x)
def __rtruediv__(self, x):
return self.__div__(x)
def __itruediv__(self, x):
for i in self.d: self.d[i][:] = self.__truediv__(x)[i][:]
return self
def __mod__(self, x):
return self.__math(operator.mod, x)
def __rmod__(self, x):
return self.__math(operator.mod, x)
def __imod__(self, x):
for i in self.d: self.d[i][:] = self.__mod__(x)[i][:]
return self
def __pow__(self, x):
return self.__math(operator.pow, x)
def __rpow__(self, x):
return self.__math(operator.pow, x)
def __ipow__(self, x):
for i in self.d: self.d[i][:] = self.__pow__(x)[i][:]
return self
def __array_op(self, f, x, axis):
"""operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)
:param f: operator function
:param x: array(1D, 2D) or field (2D) or View (2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:
raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')
d ={}
#x is a vector (only numpy ndarray)
if isinstance(axis, int) and isinstance(x, ndarray):
if len(self.__partition.mesh.bounds[0]) == 3:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:, newaxis])
elif axis == 2:
d[i] = f(self.d[i], x[ind[2]])
else:
raise ValueError('"axis" can only have value 0, 1 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
elif len(self.__partition.mesh.bounds[0]) == 2:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:])
else:
raise ValueError('"axis" can only have value 0 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
#x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)
elif len(axis) == 2:
#operation for 2D-arrays
if isinstance(x, ndarray):
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == (0, 1) or axis == (1, 0):
d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])
elif axis == (1, 2) or axis == (2, 1):
d[i] = f(self.d[i], x[ind[1], ind[2]])
elif axis == (0, 2) or axis == (2, 0):
d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])
else:
raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))
#operation for 2D Fields or View (Field from same origin mesh but bounds like View has)
elif isinstance(x, Field) or isinstance(x, View):
if axis == (0, 1) or axis == (1, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])
except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')
elif axis == (1, 2) or axis == (2, 1):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])
except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')
elif axis == (0, 2) or axis == (2, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])
except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')
else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')
else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')
return d
def sub(self, x, axis):
"""Function to sub 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.sub, x, axis)
def mul(self, x, axis):
"""Function to multiply 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mul, x, axis)
def div(self, x, axis):
"""Function to divide 3D View by vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.div, x, axis)
def mod(self, x, axis):
"""Function to modulo 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mod, x, axis)
def pow(self, x, axis):
"""Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.pow, x, axis)
|
cstatz/maui | maui/field/view.py | View.sub | python | def sub(self, x, axis):
return self.__array_op(operator.sub, x, axis) | Function to sub 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/field/view.py#L296-L302 | [
"def __array_op(self, f, x, axis):\n \"\"\"operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)\n :param f: operator function\n :param x: array(1D, 2D) or field (2D) or View (2D)\n :param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis\n :return: dict with result of operation (same form as field.d)\n \"\"\"\n if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:\n raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')\n\n d ={}\n #x is a vector (only numpy ndarray)\n if isinstance(axis, int) and isinstance(x, ndarray):\n if len(self.__partition.mesh.bounds[0]) == 3:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:, newaxis])\n elif axis == 2:\n d[i] = f(self.d[i], x[ind[2]])\n else:\n raise ValueError('\"axis\" can only have value 0, 1 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n elif len(self.__partition.mesh.bounds[0]) == 2:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:])\n else:\n raise ValueError('\"axis\" can only have value 0 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n #x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)\n elif len(axis) == 2:\n #operation for 2D-arrays\n if isinstance(x, ndarray):\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == (0, 1) or axis == (1, 0):\n d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])\n elif axis == (1, 2) or axis == (2, 1):\n d[i] = f(self.d[i], x[ind[1], ind[2]])\n elif axis == (0, 2) or axis == (2, 0):\n d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])\n else:\n raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))\n #operation for 2D Fields or View (Field from same origin mesh but bounds like View has)\n elif isinstance(x, Field) or isinstance(x, View):\n if axis == (0, 1) or axis == (1, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])\n except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')\n elif axis == (1, 2) or axis == (2, 1):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])\n except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')\n elif axis == (0, 2) or axis == (2, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])\n except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')\n else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')\n\n else: raise ValueError('Argument \"axis\" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')\n\n return d\n"
] | class View(Field):
def __init__(self, field, partition=None, bounds=None):
""" View prototype class.
:param partition: Partition or Mesh, coordinate space associated with the field.
:param name: String, unique name of the field/variable.
:param unit: String, physical unit associated with the field.
:param rank: Integer, rank of the field.
:param interpolation: Interpolator, class to obtain value from the field by coordinate.
:param bounds: 2-Tuple of Tuples, bounds of the field inside the mesh.
"""
self.__field = field
if partition is None:
self.__partition = self.__field.partition.copy(bounds=bounds)
else:
self.__partition = partition.copy(bounds=bounds)
self.__bounds = bounds
self.__domain_data = self.__field.data
self.__d = self.__field.data.d # Dict of low level data stores
self.__start_index = self.__partition.mesh.nearest_node(bounds[0])[0]
self.__stop_index = self.__partition.mesh.nearest_node(bounds[1])[0]
tmp = list(self.__stop_index)
for i in range(len(tmp)):
tmp[i] += 1
self.__stop_index = tuple(tmp)
self.__mask = create_mask_from_indices((self.__start_index, self.__stop_index))
def __getitem__(self, index):
# Mask
# Only check bounds using start and stop index!
return self.__domain_data[index]
def __setitem__(self, index, data):
# Mask
# Only check bounds!
self.__domain_data[index] = data
@property
def bounds(self):
return self.__bounds
@property
def partition(self):
return self.__partition
@property
def rank(self):
return self.__field.rank
@property
def name(self):
return self.__field.name
@property
def unit(self):
return self.__field.unit
@property
def interpolation(self):
return self.__field.interpolation
@property
def d(self):
""" Primitive data property.
:return: dict() of numpy.ndarrays with the keys of the dict being the coordinate of the corresponding Domain
"""
return self.__field[self.__mask]
def sync(self):
self.__domain_data.sync()
def __indices(self, slice_partition, slice_mask):
#function to generate Indices for operations with Array and Field with bounds different from mesh bounds
ind=[]
for j in xrange(len(slice_partition)):
if slice_partition[j].start <= slice_mask[j].start:
start = None
else:
start = slice_partition[j].start - slice_mask[j].start
if slice_partition[j].stop >= slice_mask[j].stop:
stop = None
else:
stop = slice_partition[j].stop - slice_mask[j].start
ind.append(slice(start, stop, None))
return tuple(ind)
def __math(self, f, x):
"""operator function
:param f: operator.add/sub/mul... used operator
:param x: other object view should be add/sub... with (other View, number, numpy.ndarray, Field)
:return: dictionary (same shape as field.d) with result of operation
"""
d = {}
#operation with single number
if isinstance(x, (int, long, float, complex)):
for i in self.d: d[i] = f(self.d[i], x)
return d
#operation with other view (must have same size and partitions as self) or Field from same mesh and same bounds like View
elif isinstance(x, View) or isinstance(x, Field):
try:
for i in self.d:
d[i] = f(self.d[i], x.d[i])
return d
except: raise ValueError('Views have to be partitioned and shaped in the same way to be add/sub/mul/div/pow/mod\nField has to have same bounds and origin mesh as View.')
#operation with numpy array
elif isinstance(x, ndarray):
#array has to be of the same Size as View
try:
for i in self.d:
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as View for operation')
else: raise ValueError('Operators only available for View and (View, Field, numpy.ndarray with same shape View, integer, float, complex).')
def __add__(self, x):
return self.__math(operator.add, x)
def __radd__(self, x):
return self.__math(operator.add, x)
def __iadd__(self, x):
for i in self.d: self.d[i][:] = self.__add__(x)[i][:]
return self
def __sub__(self, x):
return self.__math(operator.sub, x)
def __rsub__(self, x):
return self.__math(operator.sub, x)
def __isub__(self, x):
for i in self.d: self.d[i][:] = self.__sub__(x)[i][:]
return self
def __mul__(self, x):
return self.__math(operator.mul, x)
def __rmul__(self, x):
return self.__math(operator.mul, x)
def __imul__(self, x):
for i in self.d: self.d[i][:] = self.__mul__(x)[i][:]
return self
def __div__(self, x):
return self.__math(operator.div, x)
def __rdiv__(self, x):
return self.__math(operator.div, x)
def __idiv__(self, x):
for i in self.d: self.d[i][:] = self.__div__(x)[i][:]
return self
def __truediv__(self, x):
return self.__div__(x)
def __rtruediv__(self, x):
return self.__div__(x)
def __itruediv__(self, x):
for i in self.d: self.d[i][:] = self.__truediv__(x)[i][:]
return self
def __mod__(self, x):
return self.__math(operator.mod, x)
def __rmod__(self, x):
return self.__math(operator.mod, x)
def __imod__(self, x):
for i in self.d: self.d[i][:] = self.__mod__(x)[i][:]
return self
def __pow__(self, x):
return self.__math(operator.pow, x)
def __rpow__(self, x):
return self.__math(operator.pow, x)
def __ipow__(self, x):
for i in self.d: self.d[i][:] = self.__pow__(x)[i][:]
return self
def __array_op(self, f, x, axis):
"""operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)
:param f: operator function
:param x: array(1D, 2D) or field (2D) or View (2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:
raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')
d ={}
#x is a vector (only numpy ndarray)
if isinstance(axis, int) and isinstance(x, ndarray):
if len(self.__partition.mesh.bounds[0]) == 3:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:, newaxis])
elif axis == 2:
d[i] = f(self.d[i], x[ind[2]])
else:
raise ValueError('"axis" can only have value 0, 1 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
elif len(self.__partition.mesh.bounds[0]) == 2:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:])
else:
raise ValueError('"axis" can only have value 0 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
#x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)
elif len(axis) == 2:
#operation for 2D-arrays
if isinstance(x, ndarray):
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == (0, 1) or axis == (1, 0):
d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])
elif axis == (1, 2) or axis == (2, 1):
d[i] = f(self.d[i], x[ind[1], ind[2]])
elif axis == (0, 2) or axis == (2, 0):
d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])
else:
raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))
#operation for 2D Fields or View (Field from same origin mesh but bounds like View has)
elif isinstance(x, Field) or isinstance(x, View):
if axis == (0, 1) or axis == (1, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])
except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')
elif axis == (1, 2) or axis == (2, 1):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])
except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')
elif axis == (0, 2) or axis == (2, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])
except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')
else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')
else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')
return d
def add(self, x, axis):
"""Function to add 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.add, x, axis)
def mul(self, x, axis):
"""Function to multiply 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mul, x, axis)
def div(self, x, axis):
"""Function to divide 3D View by vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.div, x, axis)
def mod(self, x, axis):
"""Function to modulo 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mod, x, axis)
def pow(self, x, axis):
"""Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.pow, x, axis)
|
cstatz/maui | maui/field/view.py | View.mul | python | def mul(self, x, axis):
return self.__array_op(operator.mul, x, axis) | Function to multiply 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/field/view.py#L304-L310 | [
"def __array_op(self, f, x, axis):\n \"\"\"operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)\n :param f: operator function\n :param x: array(1D, 2D) or field (2D) or View (2D)\n :param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis\n :return: dict with result of operation (same form as field.d)\n \"\"\"\n if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:\n raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')\n\n d ={}\n #x is a vector (only numpy ndarray)\n if isinstance(axis, int) and isinstance(x, ndarray):\n if len(self.__partition.mesh.bounds[0]) == 3:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:, newaxis])\n elif axis == 2:\n d[i] = f(self.d[i], x[ind[2]])\n else:\n raise ValueError('\"axis\" can only have value 0, 1 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n elif len(self.__partition.mesh.bounds[0]) == 2:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:])\n else:\n raise ValueError('\"axis\" can only have value 0 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n #x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)\n elif len(axis) == 2:\n #operation for 2D-arrays\n if isinstance(x, ndarray):\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == (0, 1) or axis == (1, 0):\n d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])\n elif axis == (1, 2) or axis == (2, 1):\n d[i] = f(self.d[i], x[ind[1], ind[2]])\n elif axis == (0, 2) or axis == (2, 0):\n d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])\n else:\n raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))\n #operation for 2D Fields or View (Field from same origin mesh but bounds like View has)\n elif isinstance(x, Field) or isinstance(x, View):\n if axis == (0, 1) or axis == (1, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])\n except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')\n elif axis == (1, 2) or axis == (2, 1):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])\n except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')\n elif axis == (0, 2) or axis == (2, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])\n except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')\n else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')\n\n else: raise ValueError('Argument \"axis\" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')\n\n return d\n"
] | class View(Field):
def __init__(self, field, partition=None, bounds=None):
""" View prototype class.
:param partition: Partition or Mesh, coordinate space associated with the field.
:param name: String, unique name of the field/variable.
:param unit: String, physical unit associated with the field.
:param rank: Integer, rank of the field.
:param interpolation: Interpolator, class to obtain value from the field by coordinate.
:param bounds: 2-Tuple of Tuples, bounds of the field inside the mesh.
"""
self.__field = field
if partition is None:
self.__partition = self.__field.partition.copy(bounds=bounds)
else:
self.__partition = partition.copy(bounds=bounds)
self.__bounds = bounds
self.__domain_data = self.__field.data
self.__d = self.__field.data.d # Dict of low level data stores
self.__start_index = self.__partition.mesh.nearest_node(bounds[0])[0]
self.__stop_index = self.__partition.mesh.nearest_node(bounds[1])[0]
tmp = list(self.__stop_index)
for i in range(len(tmp)):
tmp[i] += 1
self.__stop_index = tuple(tmp)
self.__mask = create_mask_from_indices((self.__start_index, self.__stop_index))
def __getitem__(self, index):
# Mask
# Only check bounds using start and stop index!
return self.__domain_data[index]
def __setitem__(self, index, data):
# Mask
# Only check bounds!
self.__domain_data[index] = data
@property
def bounds(self):
return self.__bounds
@property
def partition(self):
return self.__partition
@property
def rank(self):
return self.__field.rank
@property
def name(self):
return self.__field.name
@property
def unit(self):
return self.__field.unit
@property
def interpolation(self):
return self.__field.interpolation
@property
def d(self):
""" Primitive data property.
:return: dict() of numpy.ndarrays with the keys of the dict being the coordinate of the corresponding Domain
"""
return self.__field[self.__mask]
def sync(self):
self.__domain_data.sync()
def __indices(self, slice_partition, slice_mask):
#function to generate Indices for operations with Array and Field with bounds different from mesh bounds
ind=[]
for j in xrange(len(slice_partition)):
if slice_partition[j].start <= slice_mask[j].start:
start = None
else:
start = slice_partition[j].start - slice_mask[j].start
if slice_partition[j].stop >= slice_mask[j].stop:
stop = None
else:
stop = slice_partition[j].stop - slice_mask[j].start
ind.append(slice(start, stop, None))
return tuple(ind)
def __math(self, f, x):
"""operator function
:param f: operator.add/sub/mul... used operator
:param x: other object view should be add/sub... with (other View, number, numpy.ndarray, Field)
:return: dictionary (same shape as field.d) with result of operation
"""
d = {}
#operation with single number
if isinstance(x, (int, long, float, complex)):
for i in self.d: d[i] = f(self.d[i], x)
return d
#operation with other view (must have same size and partitions as self) or Field from same mesh and same bounds like View
elif isinstance(x, View) or isinstance(x, Field):
try:
for i in self.d:
d[i] = f(self.d[i], x.d[i])
return d
except: raise ValueError('Views have to be partitioned and shaped in the same way to be add/sub/mul/div/pow/mod\nField has to have same bounds and origin mesh as View.')
#operation with numpy array
elif isinstance(x, ndarray):
#array has to be of the same Size as View
try:
for i in self.d:
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as View for operation')
else: raise ValueError('Operators only available for View and (View, Field, numpy.ndarray with same shape View, integer, float, complex).')
def __add__(self, x):
return self.__math(operator.add, x)
def __radd__(self, x):
return self.__math(operator.add, x)
def __iadd__(self, x):
for i in self.d: self.d[i][:] = self.__add__(x)[i][:]
return self
def __sub__(self, x):
return self.__math(operator.sub, x)
def __rsub__(self, x):
return self.__math(operator.sub, x)
def __isub__(self, x):
for i in self.d: self.d[i][:] = self.__sub__(x)[i][:]
return self
def __mul__(self, x):
return self.__math(operator.mul, x)
def __rmul__(self, x):
return self.__math(operator.mul, x)
def __imul__(self, x):
for i in self.d: self.d[i][:] = self.__mul__(x)[i][:]
return self
def __div__(self, x):
return self.__math(operator.div, x)
def __rdiv__(self, x):
return self.__math(operator.div, x)
def __idiv__(self, x):
for i in self.d: self.d[i][:] = self.__div__(x)[i][:]
return self
def __truediv__(self, x):
return self.__div__(x)
def __rtruediv__(self, x):
return self.__div__(x)
def __itruediv__(self, x):
for i in self.d: self.d[i][:] = self.__truediv__(x)[i][:]
return self
def __mod__(self, x):
return self.__math(operator.mod, x)
def __rmod__(self, x):
return self.__math(operator.mod, x)
def __imod__(self, x):
for i in self.d: self.d[i][:] = self.__mod__(x)[i][:]
return self
def __pow__(self, x):
return self.__math(operator.pow, x)
def __rpow__(self, x):
return self.__math(operator.pow, x)
def __ipow__(self, x):
for i in self.d: self.d[i][:] = self.__pow__(x)[i][:]
return self
def __array_op(self, f, x, axis):
"""operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)
:param f: operator function
:param x: array(1D, 2D) or field (2D) or View (2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:
raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')
d ={}
#x is a vector (only numpy ndarray)
if isinstance(axis, int) and isinstance(x, ndarray):
if len(self.__partition.mesh.bounds[0]) == 3:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:, newaxis])
elif axis == 2:
d[i] = f(self.d[i], x[ind[2]])
else:
raise ValueError('"axis" can only have value 0, 1 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
elif len(self.__partition.mesh.bounds[0]) == 2:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:])
else:
raise ValueError('"axis" can only have value 0 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
#x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)
elif len(axis) == 2:
#operation for 2D-arrays
if isinstance(x, ndarray):
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == (0, 1) or axis == (1, 0):
d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])
elif axis == (1, 2) or axis == (2, 1):
d[i] = f(self.d[i], x[ind[1], ind[2]])
elif axis == (0, 2) or axis == (2, 0):
d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])
else:
raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))
#operation for 2D Fields or View (Field from same origin mesh but bounds like View has)
elif isinstance(x, Field) or isinstance(x, View):
if axis == (0, 1) or axis == (1, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])
except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')
elif axis == (1, 2) or axis == (2, 1):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])
except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')
elif axis == (0, 2) or axis == (2, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])
except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')
else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')
else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')
return d
def add(self, x, axis):
"""Function to add 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.add, x, axis)
def sub(self, x, axis):
"""Function to sub 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.sub, x, axis)
def div(self, x, axis):
"""Function to divide 3D View by vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.div, x, axis)
def mod(self, x, axis):
"""Function to modulo 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mod, x, axis)
def pow(self, x, axis):
"""Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.pow, x, axis)
|
cstatz/maui | maui/field/view.py | View.div | python | def div(self, x, axis):
return self.__array_op(operator.div, x, axis) | Function to divide 3D View by vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/field/view.py#L312-L318 | [
"def __array_op(self, f, x, axis):\n \"\"\"operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)\n :param f: operator function\n :param x: array(1D, 2D) or field (2D) or View (2D)\n :param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis\n :return: dict with result of operation (same form as field.d)\n \"\"\"\n if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:\n raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')\n\n d ={}\n #x is a vector (only numpy ndarray)\n if isinstance(axis, int) and isinstance(x, ndarray):\n if len(self.__partition.mesh.bounds[0]) == 3:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:, newaxis])\n elif axis == 2:\n d[i] = f(self.d[i], x[ind[2]])\n else:\n raise ValueError('\"axis\" can only have value 0, 1 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n elif len(self.__partition.mesh.bounds[0]) == 2:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:])\n else:\n raise ValueError('\"axis\" can only have value 0 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n #x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)\n elif len(axis) == 2:\n #operation for 2D-arrays\n if isinstance(x, ndarray):\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == (0, 1) or axis == (1, 0):\n d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])\n elif axis == (1, 2) or axis == (2, 1):\n d[i] = f(self.d[i], x[ind[1], ind[2]])\n elif axis == (0, 2) or axis == (2, 0):\n d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])\n else:\n raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))\n #operation for 2D Fields or View (Field from same origin mesh but bounds like View has)\n elif isinstance(x, Field) or isinstance(x, View):\n if axis == (0, 1) or axis == (1, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])\n except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')\n elif axis == (1, 2) or axis == (2, 1):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])\n except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')\n elif axis == (0, 2) or axis == (2, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])\n except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')\n else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')\n\n else: raise ValueError('Argument \"axis\" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')\n\n return d\n"
] | class View(Field):
def __init__(self, field, partition=None, bounds=None):
""" View prototype class.
:param partition: Partition or Mesh, coordinate space associated with the field.
:param name: String, unique name of the field/variable.
:param unit: String, physical unit associated with the field.
:param rank: Integer, rank of the field.
:param interpolation: Interpolator, class to obtain value from the field by coordinate.
:param bounds: 2-Tuple of Tuples, bounds of the field inside the mesh.
"""
self.__field = field
if partition is None:
self.__partition = self.__field.partition.copy(bounds=bounds)
else:
self.__partition = partition.copy(bounds=bounds)
self.__bounds = bounds
self.__domain_data = self.__field.data
self.__d = self.__field.data.d # Dict of low level data stores
self.__start_index = self.__partition.mesh.nearest_node(bounds[0])[0]
self.__stop_index = self.__partition.mesh.nearest_node(bounds[1])[0]
tmp = list(self.__stop_index)
for i in range(len(tmp)):
tmp[i] += 1
self.__stop_index = tuple(tmp)
self.__mask = create_mask_from_indices((self.__start_index, self.__stop_index))
def __getitem__(self, index):
# Mask
# Only check bounds using start and stop index!
return self.__domain_data[index]
def __setitem__(self, index, data):
# Mask
# Only check bounds!
self.__domain_data[index] = data
@property
def bounds(self):
return self.__bounds
@property
def partition(self):
return self.__partition
@property
def rank(self):
return self.__field.rank
@property
def name(self):
return self.__field.name
@property
def unit(self):
return self.__field.unit
@property
def interpolation(self):
return self.__field.interpolation
@property
def d(self):
""" Primitive data property.
:return: dict() of numpy.ndarrays with the keys of the dict being the coordinate of the corresponding Domain
"""
return self.__field[self.__mask]
def sync(self):
self.__domain_data.sync()
def __indices(self, slice_partition, slice_mask):
#function to generate Indices for operations with Array and Field with bounds different from mesh bounds
ind=[]
for j in xrange(len(slice_partition)):
if slice_partition[j].start <= slice_mask[j].start:
start = None
else:
start = slice_partition[j].start - slice_mask[j].start
if slice_partition[j].stop >= slice_mask[j].stop:
stop = None
else:
stop = slice_partition[j].stop - slice_mask[j].start
ind.append(slice(start, stop, None))
return tuple(ind)
def __math(self, f, x):
"""operator function
:param f: operator.add/sub/mul... used operator
:param x: other object view should be add/sub... with (other View, number, numpy.ndarray, Field)
:return: dictionary (same shape as field.d) with result of operation
"""
d = {}
#operation with single number
if isinstance(x, (int, long, float, complex)):
for i in self.d: d[i] = f(self.d[i], x)
return d
#operation with other view (must have same size and partitions as self) or Field from same mesh and same bounds like View
elif isinstance(x, View) or isinstance(x, Field):
try:
for i in self.d:
d[i] = f(self.d[i], x.d[i])
return d
except: raise ValueError('Views have to be partitioned and shaped in the same way to be add/sub/mul/div/pow/mod\nField has to have same bounds and origin mesh as View.')
#operation with numpy array
elif isinstance(x, ndarray):
#array has to be of the same Size as View
try:
for i in self.d:
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as View for operation')
else: raise ValueError('Operators only available for View and (View, Field, numpy.ndarray with same shape View, integer, float, complex).')
def __add__(self, x):
return self.__math(operator.add, x)
def __radd__(self, x):
return self.__math(operator.add, x)
def __iadd__(self, x):
for i in self.d: self.d[i][:] = self.__add__(x)[i][:]
return self
def __sub__(self, x):
return self.__math(operator.sub, x)
def __rsub__(self, x):
return self.__math(operator.sub, x)
def __isub__(self, x):
for i in self.d: self.d[i][:] = self.__sub__(x)[i][:]
return self
def __mul__(self, x):
return self.__math(operator.mul, x)
def __rmul__(self, x):
return self.__math(operator.mul, x)
def __imul__(self, x):
for i in self.d: self.d[i][:] = self.__mul__(x)[i][:]
return self
def __div__(self, x):
return self.__math(operator.div, x)
def __rdiv__(self, x):
return self.__math(operator.div, x)
def __idiv__(self, x):
for i in self.d: self.d[i][:] = self.__div__(x)[i][:]
return self
def __truediv__(self, x):
return self.__div__(x)
def __rtruediv__(self, x):
return self.__div__(x)
def __itruediv__(self, x):
for i in self.d: self.d[i][:] = self.__truediv__(x)[i][:]
return self
def __mod__(self, x):
return self.__math(operator.mod, x)
def __rmod__(self, x):
return self.__math(operator.mod, x)
def __imod__(self, x):
for i in self.d: self.d[i][:] = self.__mod__(x)[i][:]
return self
def __pow__(self, x):
return self.__math(operator.pow, x)
def __rpow__(self, x):
return self.__math(operator.pow, x)
def __ipow__(self, x):
for i in self.d: self.d[i][:] = self.__pow__(x)[i][:]
return self
def __array_op(self, f, x, axis):
"""operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)
:param f: operator function
:param x: array(1D, 2D) or field (2D) or View (2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:
raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')
d ={}
#x is a vector (only numpy ndarray)
if isinstance(axis, int) and isinstance(x, ndarray):
if len(self.__partition.mesh.bounds[0]) == 3:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:, newaxis])
elif axis == 2:
d[i] = f(self.d[i], x[ind[2]])
else:
raise ValueError('"axis" can only have value 0, 1 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
elif len(self.__partition.mesh.bounds[0]) == 2:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:])
else:
raise ValueError('"axis" can only have value 0 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
#x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)
elif len(axis) == 2:
#operation for 2D-arrays
if isinstance(x, ndarray):
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == (0, 1) or axis == (1, 0):
d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])
elif axis == (1, 2) or axis == (2, 1):
d[i] = f(self.d[i], x[ind[1], ind[2]])
elif axis == (0, 2) or axis == (2, 0):
d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])
else:
raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))
#operation for 2D Fields or View (Field from same origin mesh but bounds like View has)
elif isinstance(x, Field) or isinstance(x, View):
if axis == (0, 1) or axis == (1, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])
except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')
elif axis == (1, 2) or axis == (2, 1):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])
except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')
elif axis == (0, 2) or axis == (2, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])
except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')
else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')
else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')
return d
def add(self, x, axis):
"""Function to add 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.add, x, axis)
def sub(self, x, axis):
"""Function to sub 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.sub, x, axis)
def mul(self, x, axis):
"""Function to multiply 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mul, x, axis)
def mod(self, x, axis):
"""Function to modulo 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mod, x, axis)
def pow(self, x, axis):
"""Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.pow, x, axis)
|
cstatz/maui | maui/field/view.py | View.mod | python | def mod(self, x, axis):
return self.__array_op(operator.mod, x, axis) | Function to modulo 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/field/view.py#L320-L326 | [
"def __array_op(self, f, x, axis):\n \"\"\"operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)\n :param f: operator function\n :param x: array(1D, 2D) or field (2D) or View (2D)\n :param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis\n :return: dict with result of operation (same form as field.d)\n \"\"\"\n if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:\n raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')\n\n d ={}\n #x is a vector (only numpy ndarray)\n if isinstance(axis, int) and isinstance(x, ndarray):\n if len(self.__partition.mesh.bounds[0]) == 3:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:, newaxis])\n elif axis == 2:\n d[i] = f(self.d[i], x[ind[2]])\n else:\n raise ValueError('\"axis\" can only have value 0, 1 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n elif len(self.__partition.mesh.bounds[0]) == 2:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:])\n else:\n raise ValueError('\"axis\" can only have value 0 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n #x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)\n elif len(axis) == 2:\n #operation for 2D-arrays\n if isinstance(x, ndarray):\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == (0, 1) or axis == (1, 0):\n d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])\n elif axis == (1, 2) or axis == (2, 1):\n d[i] = f(self.d[i], x[ind[1], ind[2]])\n elif axis == (0, 2) or axis == (2, 0):\n d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])\n else:\n raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))\n #operation for 2D Fields or View (Field from same origin mesh but bounds like View has)\n elif isinstance(x, Field) or isinstance(x, View):\n if axis == (0, 1) or axis == (1, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])\n except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')\n elif axis == (1, 2) or axis == (2, 1):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])\n except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')\n elif axis == (0, 2) or axis == (2, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])\n except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')\n else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')\n\n else: raise ValueError('Argument \"axis\" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')\n\n return d\n"
] | class View(Field):
def __init__(self, field, partition=None, bounds=None):
""" View prototype class.
:param partition: Partition or Mesh, coordinate space associated with the field.
:param name: String, unique name of the field/variable.
:param unit: String, physical unit associated with the field.
:param rank: Integer, rank of the field.
:param interpolation: Interpolator, class to obtain value from the field by coordinate.
:param bounds: 2-Tuple of Tuples, bounds of the field inside the mesh.
"""
self.__field = field
if partition is None:
self.__partition = self.__field.partition.copy(bounds=bounds)
else:
self.__partition = partition.copy(bounds=bounds)
self.__bounds = bounds
self.__domain_data = self.__field.data
self.__d = self.__field.data.d # Dict of low level data stores
self.__start_index = self.__partition.mesh.nearest_node(bounds[0])[0]
self.__stop_index = self.__partition.mesh.nearest_node(bounds[1])[0]
tmp = list(self.__stop_index)
for i in range(len(tmp)):
tmp[i] += 1
self.__stop_index = tuple(tmp)
self.__mask = create_mask_from_indices((self.__start_index, self.__stop_index))
def __getitem__(self, index):
# Mask
# Only check bounds using start and stop index!
return self.__domain_data[index]
def __setitem__(self, index, data):
# Mask
# Only check bounds!
self.__domain_data[index] = data
@property
def bounds(self):
return self.__bounds
@property
def partition(self):
return self.__partition
@property
def rank(self):
return self.__field.rank
@property
def name(self):
return self.__field.name
@property
def unit(self):
return self.__field.unit
@property
def interpolation(self):
return self.__field.interpolation
@property
def d(self):
""" Primitive data property.
:return: dict() of numpy.ndarrays with the keys of the dict being the coordinate of the corresponding Domain
"""
return self.__field[self.__mask]
def sync(self):
self.__domain_data.sync()
def __indices(self, slice_partition, slice_mask):
#function to generate Indices for operations with Array and Field with bounds different from mesh bounds
ind=[]
for j in xrange(len(slice_partition)):
if slice_partition[j].start <= slice_mask[j].start:
start = None
else:
start = slice_partition[j].start - slice_mask[j].start
if slice_partition[j].stop >= slice_mask[j].stop:
stop = None
else:
stop = slice_partition[j].stop - slice_mask[j].start
ind.append(slice(start, stop, None))
return tuple(ind)
def __math(self, f, x):
"""operator function
:param f: operator.add/sub/mul... used operator
:param x: other object view should be add/sub... with (other View, number, numpy.ndarray, Field)
:return: dictionary (same shape as field.d) with result of operation
"""
d = {}
#operation with single number
if isinstance(x, (int, long, float, complex)):
for i in self.d: d[i] = f(self.d[i], x)
return d
#operation with other view (must have same size and partitions as self) or Field from same mesh and same bounds like View
elif isinstance(x, View) or isinstance(x, Field):
try:
for i in self.d:
d[i] = f(self.d[i], x.d[i])
return d
except: raise ValueError('Views have to be partitioned and shaped in the same way to be add/sub/mul/div/pow/mod\nField has to have same bounds and origin mesh as View.')
#operation with numpy array
elif isinstance(x, ndarray):
#array has to be of the same Size as View
try:
for i in self.d:
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as View for operation')
else: raise ValueError('Operators only available for View and (View, Field, numpy.ndarray with same shape View, integer, float, complex).')
def __add__(self, x):
return self.__math(operator.add, x)
def __radd__(self, x):
return self.__math(operator.add, x)
def __iadd__(self, x):
for i in self.d: self.d[i][:] = self.__add__(x)[i][:]
return self
def __sub__(self, x):
return self.__math(operator.sub, x)
def __rsub__(self, x):
return self.__math(operator.sub, x)
def __isub__(self, x):
for i in self.d: self.d[i][:] = self.__sub__(x)[i][:]
return self
def __mul__(self, x):
return self.__math(operator.mul, x)
def __rmul__(self, x):
return self.__math(operator.mul, x)
def __imul__(self, x):
for i in self.d: self.d[i][:] = self.__mul__(x)[i][:]
return self
def __div__(self, x):
return self.__math(operator.div, x)
def __rdiv__(self, x):
return self.__math(operator.div, x)
def __idiv__(self, x):
for i in self.d: self.d[i][:] = self.__div__(x)[i][:]
return self
def __truediv__(self, x):
return self.__div__(x)
def __rtruediv__(self, x):
return self.__div__(x)
def __itruediv__(self, x):
for i in self.d: self.d[i][:] = self.__truediv__(x)[i][:]
return self
def __mod__(self, x):
return self.__math(operator.mod, x)
def __rmod__(self, x):
return self.__math(operator.mod, x)
def __imod__(self, x):
for i in self.d: self.d[i][:] = self.__mod__(x)[i][:]
return self
def __pow__(self, x):
return self.__math(operator.pow, x)
def __rpow__(self, x):
return self.__math(operator.pow, x)
def __ipow__(self, x):
for i in self.d: self.d[i][:] = self.__pow__(x)[i][:]
return self
def __array_op(self, f, x, axis):
"""operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)
:param f: operator function
:param x: array(1D, 2D) or field (2D) or View (2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:
raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')
d ={}
#x is a vector (only numpy ndarray)
if isinstance(axis, int) and isinstance(x, ndarray):
if len(self.__partition.mesh.bounds[0]) == 3:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:, newaxis])
elif axis == 2:
d[i] = f(self.d[i], x[ind[2]])
else:
raise ValueError('"axis" can only have value 0, 1 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
elif len(self.__partition.mesh.bounds[0]) == 2:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:])
else:
raise ValueError('"axis" can only have value 0 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
#x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)
elif len(axis) == 2:
#operation for 2D-arrays
if isinstance(x, ndarray):
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == (0, 1) or axis == (1, 0):
d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])
elif axis == (1, 2) or axis == (2, 1):
d[i] = f(self.d[i], x[ind[1], ind[2]])
elif axis == (0, 2) or axis == (2, 0):
d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])
else:
raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))
#operation for 2D Fields or View (Field from same origin mesh but bounds like View has)
elif isinstance(x, Field) or isinstance(x, View):
if axis == (0, 1) or axis == (1, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])
except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')
elif axis == (1, 2) or axis == (2, 1):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])
except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')
elif axis == (0, 2) or axis == (2, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])
except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')
else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')
else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')
return d
def add(self, x, axis):
"""Function to add 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.add, x, axis)
def sub(self, x, axis):
"""Function to sub 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.sub, x, axis)
def mul(self, x, axis):
"""Function to multiply 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mul, x, axis)
def div(self, x, axis):
"""Function to divide 3D View by vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.div, x, axis)
def pow(self, x, axis):
"""Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.pow, x, axis)
|
cstatz/maui | maui/field/view.py | View.pow | python | def pow(self, x, axis):
return self.__array_op(operator.pow, x, axis) | Function to power 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d) | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/field/view.py#L328-L334 | [
"def __array_op(self, f, x, axis):\n \"\"\"operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)\n :param f: operator function\n :param x: array(1D, 2D) or field (2D) or View (2D)\n :param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis\n :return: dict with result of operation (same form as field.d)\n \"\"\"\n if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:\n raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')\n\n d ={}\n #x is a vector (only numpy ndarray)\n if isinstance(axis, int) and isinstance(x, ndarray):\n if len(self.__partition.mesh.bounds[0]) == 3:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:, newaxis])\n elif axis == 2:\n d[i] = f(self.d[i], x[ind[2]])\n else:\n raise ValueError('\"axis\" can only have value 0, 1 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n elif len(self.__partition.mesh.bounds[0]) == 2:\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == 0:\n d[i] = f(self.d[i], x[ind[0]][:, newaxis])\n elif axis == 1:\n d[i] = f(self.d[i], x[ind[1]][:])\n else:\n raise ValueError('\"axis\" can only have value 0 or 2 .')\n except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)\n #x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)\n elif len(axis) == 2:\n #operation for 2D-arrays\n if isinstance(x, ndarray):\n try:\n for i in self.d:\n ind = self.__indices(self.partition.meta_data[i], self.__mask)\n if axis == (0, 1) or axis == (1, 0):\n d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])\n elif axis == (1, 2) or axis == (2, 1):\n d[i] = f(self.d[i], x[ind[1], ind[2]])\n elif axis == (0, 2) or axis == (2, 0):\n d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])\n else:\n raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))\n #operation for 2D Fields or View (Field from same origin mesh but bounds like View has)\n elif isinstance(x, Field) or isinstance(x, View):\n if axis == (0, 1) or axis == (1, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])\n except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')\n elif axis == (1, 2) or axis == (2, 1):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])\n except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')\n elif axis == (0, 2) or axis == (2, 0):\n try:\n for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])\n except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')\n else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')\n else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')\n\n else: raise ValueError('Argument \"axis\" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')\n\n return d\n"
] | class View(Field):
def __init__(self, field, partition=None, bounds=None):
""" View prototype class.
:param partition: Partition or Mesh, coordinate space associated with the field.
:param name: String, unique name of the field/variable.
:param unit: String, physical unit associated with the field.
:param rank: Integer, rank of the field.
:param interpolation: Interpolator, class to obtain value from the field by coordinate.
:param bounds: 2-Tuple of Tuples, bounds of the field inside the mesh.
"""
self.__field = field
if partition is None:
self.__partition = self.__field.partition.copy(bounds=bounds)
else:
self.__partition = partition.copy(bounds=bounds)
self.__bounds = bounds
self.__domain_data = self.__field.data
self.__d = self.__field.data.d # Dict of low level data stores
self.__start_index = self.__partition.mesh.nearest_node(bounds[0])[0]
self.__stop_index = self.__partition.mesh.nearest_node(bounds[1])[0]
tmp = list(self.__stop_index)
for i in range(len(tmp)):
tmp[i] += 1
self.__stop_index = tuple(tmp)
self.__mask = create_mask_from_indices((self.__start_index, self.__stop_index))
def __getitem__(self, index):
# Mask
# Only check bounds using start and stop index!
return self.__domain_data[index]
def __setitem__(self, index, data):
# Mask
# Only check bounds!
self.__domain_data[index] = data
@property
def bounds(self):
return self.__bounds
@property
def partition(self):
return self.__partition
@property
def rank(self):
return self.__field.rank
@property
def name(self):
return self.__field.name
@property
def unit(self):
return self.__field.unit
@property
def interpolation(self):
return self.__field.interpolation
@property
def d(self):
""" Primitive data property.
:return: dict() of numpy.ndarrays with the keys of the dict being the coordinate of the corresponding Domain
"""
return self.__field[self.__mask]
def sync(self):
self.__domain_data.sync()
def __indices(self, slice_partition, slice_mask):
#function to generate Indices for operations with Array and Field with bounds different from mesh bounds
ind=[]
for j in xrange(len(slice_partition)):
if slice_partition[j].start <= slice_mask[j].start:
start = None
else:
start = slice_partition[j].start - slice_mask[j].start
if slice_partition[j].stop >= slice_mask[j].stop:
stop = None
else:
stop = slice_partition[j].stop - slice_mask[j].start
ind.append(slice(start, stop, None))
return tuple(ind)
def __math(self, f, x):
"""operator function
:param f: operator.add/sub/mul... used operator
:param x: other object view should be add/sub... with (other View, number, numpy.ndarray, Field)
:return: dictionary (same shape as field.d) with result of operation
"""
d = {}
#operation with single number
if isinstance(x, (int, long, float, complex)):
for i in self.d: d[i] = f(self.d[i], x)
return d
#operation with other view (must have same size and partitions as self) or Field from same mesh and same bounds like View
elif isinstance(x, View) or isinstance(x, Field):
try:
for i in self.d:
d[i] = f(self.d[i], x.d[i])
return d
except: raise ValueError('Views have to be partitioned and shaped in the same way to be add/sub/mul/div/pow/mod\nField has to have same bounds and origin mesh as View.')
#operation with numpy array
elif isinstance(x, ndarray):
#array has to be of the same Size as View
try:
for i in self.d:
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as View for operation')
else: raise ValueError('Operators only available for View and (View, Field, numpy.ndarray with same shape View, integer, float, complex).')
def __add__(self, x):
return self.__math(operator.add, x)
def __radd__(self, x):
return self.__math(operator.add, x)
def __iadd__(self, x):
for i in self.d: self.d[i][:] = self.__add__(x)[i][:]
return self
def __sub__(self, x):
return self.__math(operator.sub, x)
def __rsub__(self, x):
return self.__math(operator.sub, x)
def __isub__(self, x):
for i in self.d: self.d[i][:] = self.__sub__(x)[i][:]
return self
def __mul__(self, x):
return self.__math(operator.mul, x)
def __rmul__(self, x):
return self.__math(operator.mul, x)
def __imul__(self, x):
for i in self.d: self.d[i][:] = self.__mul__(x)[i][:]
return self
def __div__(self, x):
return self.__math(operator.div, x)
def __rdiv__(self, x):
return self.__math(operator.div, x)
def __idiv__(self, x):
for i in self.d: self.d[i][:] = self.__div__(x)[i][:]
return self
def __truediv__(self, x):
return self.__div__(x)
def __rtruediv__(self, x):
return self.__div__(x)
def __itruediv__(self, x):
for i in self.d: self.d[i][:] = self.__truediv__(x)[i][:]
return self
def __mod__(self, x):
return self.__math(operator.mod, x)
def __rmod__(self, x):
return self.__math(operator.mod, x)
def __imod__(self, x):
for i in self.d: self.d[i][:] = self.__mod__(x)[i][:]
return self
def __pow__(self, x):
return self.__math(operator.pow, x)
def __rpow__(self, x):
return self.__math(operator.pow, x)
def __ipow__(self, x):
for i in self.d: self.d[i][:] = self.__pow__(x)[i][:]
return self
def __array_op(self, f, x, axis):
"""operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)
:param f: operator function
:param x: array(1D, 2D) or field (2D) or View (2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:
raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')
d ={}
#x is a vector (only numpy ndarray)
if isinstance(axis, int) and isinstance(x, ndarray):
if len(self.__partition.mesh.bounds[0]) == 3:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:, newaxis])
elif axis == 2:
d[i] = f(self.d[i], x[ind[2]])
else:
raise ValueError('"axis" can only have value 0, 1 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
elif len(self.__partition.mesh.bounds[0]) == 2:
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.d[i], x[ind[0]][:, newaxis])
elif axis == 1:
d[i] = f(self.d[i], x[ind[1]][:])
else:
raise ValueError('"axis" can only have value 0 or 2 .')
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
#x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)
elif len(axis) == 2:
#operation for 2D-arrays
if isinstance(x, ndarray):
try:
for i in self.d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == (0, 1) or axis == (1, 0):
d[i] = f(self.d[i], x[ind[0], ind[1]][:, :, newaxis])
elif axis == (1, 2) or axis == (2, 1):
d[i] = f(self.d[i], x[ind[1], ind[2]])
elif axis == (0, 2) or axis == (2, 0):
d[i] = f(self.d[i], x[ind[0], ind[2]][:, newaxis, :])
else:
raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))
#operation for 2D Fields or View (Field from same origin mesh but bounds like View has)
elif isinstance(x, Field) or isinstance(x, View):
if axis == (0, 1) or axis == (1, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[1])][:, :, newaxis])
except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')
elif axis == (1, 2) or axis == (2, 1):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[1],i[2])])
except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')
elif axis == (0, 2) or axis == (2, 0):
try:
for i in self.d: d[i] = f(self.d[i], x.d[(i[0],i[2])][:, newaxis, :])
except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')
else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')
else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')
return d
def add(self, x, axis):
"""Function to add 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.add, x, axis)
def sub(self, x, axis):
"""Function to sub 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.sub, x, axis)
def mul(self, x, axis):
"""Function to multiply 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mul, x, axis)
def div(self, x, axis):
"""Function to divide 3D View by vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.div, x, axis)
def mod(self, x, axis):
"""Function to modulo 3D View with vector or 2D array (type = numpy.ndarray or 2D Field or 2D View) or 2D View with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as view.d)
"""
return self.__array_op(operator.mod, x, axis)
|
cstatz/maui | maui/backend/helper.py | calc_local_indices | python | def calc_local_indices(shape, num_partitions, coordinate):
dimension = len(shape)
# check matching of cartesian communicator and shape
assert dimension == len(num_partitions)
decomposed_shapes = []
# build shape list for every dimension
for idx in range(dimension):
local_shape = shape[idx] // num_partitions[idx]
temp_shape_list = []
for _ in range(num_partitions[idx]):
temp_shape_list.append(local_shape)
# expand local partitions to match global shape
for j in range(shape[idx] % num_partitions[idx]):
temp_shape_list[j] += 1
# decomposed_shapes[dimension][partition]
decomposed_shapes.append(temp_shape_list)
# calculate indices for partitions
indices = []
# TODO: redefine calculation -> first select and calculate
for i in range(dimension):
temp_index_list = []
start_idx = 0
end_idx = 0
for j in range(num_partitions[i]):
end_idx = end_idx + decomposed_shapes[i][j]
temp_index_list.append([start_idx, end_idx])
start_idx = end_idx
indices.append(temp_index_list)
start_index = []
stop_index = []
shape = []
# select partition, start and stop index
for idx in range(dimension):
start_index.append(indices[idx][coordinate[idx]][0])
stop_index.append(indices[idx][coordinate[idx]][1])
shape.append(decomposed_shapes[idx][coordinate[idx]])
shape = tuple(shape)
start_index = tuple(start_index)
stop_index = tuple(stop_index)
return start_index, stop_index, shape | calculate local indices, return start and stop index per dimension per process for local data field
:param shape: global shape of data
:param num_partitions: number of partition for each dimension (from MPI.Compute_dims())
:param coordinate: cartesian coordinate descriptor (from CARTESIAN_COMMUNICATOR.Get_coords(rank))
:return: tuple of start/stop index per dimension ((start_x, stop_x), (start_y, stop_y), ...) | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/backend/helper.py#L85-L146 | null | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
__author__ = 'christoph.statz <at> tu-dresden.de'
import numpy as np
def compute_receiver_halo_location(mesh, halos, idx, sender):
halo_direction = np.asarray(idx)-np.asarray(sender)
high_sender = any(h < 0 for h in halo_direction)
fr = []
to = []
for i, el in enumerate(halo_direction):
if el == 0:
fr.append(mesh.axes[i][0])
to.append(mesh.axes[i][-1])
else:
if high_sender:
fr.append(mesh.axes[i][-1*halos[1][i]])
to.append(mesh.axes[i][-1])
else:
fr.append(mesh.axes[i][0])
to.append(mesh.axes[i][halos[0][i]-1])
return fr, to
def compute_sender(idx, halos):
def iterate_sender(position, direction, sender, level=0):
for i, el in enumerate(direction):
new_position = list(position)
new_position[i] = position[i] + el
if new_position[i] != position[i] and new_position[i] >= 0:
sender[level].append(tuple(new_position))
new_direction = list(direction)
new_direction[i] = 0
iterate_sender(tuple(new_position), tuple(new_direction), sender, level=level+1)
sender = {}
for i in range(len(idx)):
sender[i] = []
halo = tuple(-1*(np.array(halos[0]) > 0).astype('int'))
iterate_sender(idx, halo, sender)
halo = tuple((np.array(halos[1]) > 0).astype('int'))
iterate_sender(idx, halo, sender)
for el in sender.keys():
sender[el] = list(set(sender[el]))
return sender
def create_mask_from_indices(indices, halos=None):
if halos is None:
tmp = []
for k in range(len(indices[0])):
tmp.append(0)
halos = (tuple(tmp), tuple(tmp))
mask = []
for i, _ in enumerate(indices[0]):
mod = 0
if indices[0][i] == indices[1][i]:
mod = 1
mask.append(slice(indices[0][i]-halos[0][i], indices[1][i]+halos[1][i]+mod))
return tuple(mask)
def calculate_adjacency(partitions, coordinate, stencil):
if len(partitions) is not len(coordinate):
raise ValueError
for i, coord in enumerate(coordinate):
if coord > partitions[i]-1:
raise ValueError("Coordinate %i in dimension %i should not exist!" % (coord, i))
low = []
high = []
for i, p in enumerate(coordinate):
if p is 0:
low.append(0)
else:
low.append(stencil[0][i])
if p is partitions[i]-1:
high.append(0)
else:
high.append(stencil[1][i])
return tuple(low), tuple(high)
def do_create_domain(mesh, mask, bounds):
if bounds is None:
return True
for i, b in enumerate(mesh.bounds[0]):
if b > bounds[1][i]:
raise ValueError('Higher bounds out of global mesh bounds.')
for i, b in enumerate(mesh.bounds[1]):
if b < bounds[0][i]:
raise ValueError('Lower bounds out of global mesh bounds.')
if mask is not None:
try:
do_create_domain(mesh[mask], mask=None, bounds=bounds)
except ValueError:
return False
return True
def modify_halos_and_indices(mesh, indices, halos, bounds):
halos_ = list(list(x) for x in halos)
indices_ = list(list(x) for x in indices)
test_bounds = mesh[create_mask_from_indices(indices)].bounds
for i, b in enumerate(bounds[0]):
lb = list(test_bounds[0])
hb = list(test_bounds[1])
if b >= test_bounds[0][i]:
halos_[0][i] = 0 # no lower neighbor!
lb[i] = bounds[0][i]
indices_[0][i] = mesh.nearest_node(lb)[0][i]
if bounds[1][i] <= test_bounds[1][i]:
halos_[1][i] = 0 # no higher neighbor!
hb[i] = bounds[1][i]
indices_[1][i] = mesh.nearest_node(hb)[0][i] + 1
halos = tuple(tuple(x) for x in halos_)
indices = tuple(tuple(x) for x in indices_)
return halos, indices
|
cstatz/maui | maui/mesh/rectilinear.py | RectilinearMesh.minimum_pitch | python | def minimum_pitch(self):
pitch = self.pitch
minimal_pitch = []
for p in pitch:
minimal_pitch.append(min(p))
return min(minimal_pitch) | Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction. | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/mesh/rectilinear.py#L154-L165 | null | class RectilinearMesh(Mesh):
interpolator = RegularGridInterpolator
def __init__(self, axes, axes_names=('x', 'y', 'z'), unit='m'):
""" RectilinearMesh
:param axes: Values of axis nodes as tuple of 1D np.arrays.
:param axes_names: Coordinate system axes names.
:param unit: Unit of mesh values.
"""
bounds = calculate_bounds(axes)
center = calculate_center(bounds)
shape = calculate_shape(axes)
self.__axes = axes
self.__shape = shape
Mesh.__init__(self, bounds, axes_names=axes_names, unit=unit)
self.__center_index = self.nearest_node(center)[0]
def __getitem__(self, item):
# item umpopeln damit tuple of slice passt!
new_axes = []
# This only works when len(item) equals the dimension of the mesh and will not work for None!
for i, x in enumerate(item):
new_axes.append(self.axes[i][x])
return RectilinearMesh(tuple(new_axes), self.axes_names, self.unit)
def copy(self):
new_axes = []
for axe in self.axes:
new_axes.append(axe.copy())
return RectilinearMesh(tuple(new_axes), self.axes_names, self.unit)
def shift(self, offset):
# Update bounds!
low = np.array(self.bounds[0])
high = np.array(self.bounds[1])
tmp = np.array(offset)
self._bounds = (tuple(low+tmp), tuple(high+tmp))
assert len(offset) == len(self.axes)
new_axes = []
for axe in self.axes:
new_axes.append(axe.copy())
for i, d in enumerate(offset):
new_axes[i] += d
self.__axes = tuple(new_axes)
return self
@property
def pitch(self):
dimension = self._dimension # len(self._axes)
pitch = [0.] * dimension
for dim in range(dimension):
axis_len = len(self.__axes[dim])
# create empty numpy array
coordinates = np.zeros(axis_len-1)
for idx in range(axis_len-1):
coordinates[idx] = (self.__axes[dim][idx+1]-self.__axes[dim][idx])
pitch[dim] = coordinates.copy()
return tuple(pitch)
@property
def axes(self):
return self.__axes
@property
def shape(self):
return self.__shape
@property
def center_index(self):
return self.__center_index
@property
def nearest_node(self, position):
idx = []
point = []
for i in range(len(self.axes)):
if position[i] < self.bounds[0][i] or position[i] > self.bounds[1][i]:
raise ValueError('The given position is outside the mesh bounds!')
tmp = (np.abs(self.axes[i]-position[i])).argmin()
idx.append(int(tmp))
point.append(self.axes[i][tmp])
return tuple(idx), tuple(point), np.linalg.norm(np.asarray(position)-np.asarray(point))
def surrounding_nodes(self, position):
""" Returns nearest node indices and direction of opposite node.
:param position: Position inside the mesh to search nearest node for as (x,y,z)
:return: Nearest node indices and direction of opposite node.
"""
n_node_index, n_node_position, n_node_error = self.nearest_node(position)
if n_node_error == 0.0:
index_mod = []
for i in range(len(n_node_index)):
new_point = np.asarray(n_node_position)
new_point[i] += 1.e-5*np.abs(new_point[i])
try:
self.nearest_node(tuple(new_point))
index_mod.append(-1)
except ValueError:
index_mod.append(1)
else:
# Check if node_position is larger or smaller in resp. axes than position
index_mod = []
for i in range(len(n_node_index)):
if n_node_position[i] > position[i]:
index_mod.append(-1)
else:
index_mod.append(1)
return tuple(n_node_index), tuple(index_mod)
|
cstatz/maui | maui/mesh/rectilinear.py | RectilinearMesh.surrounding_nodes | python | def surrounding_nodes(self, position):
n_node_index, n_node_position, n_node_error = self.nearest_node(position)
if n_node_error == 0.0:
index_mod = []
for i in range(len(n_node_index)):
new_point = np.asarray(n_node_position)
new_point[i] += 1.e-5*np.abs(new_point[i])
try:
self.nearest_node(tuple(new_point))
index_mod.append(-1)
except ValueError:
index_mod.append(1)
else:
# Check if node_position is larger or smaller in resp. axes than position
index_mod = []
for i in range(len(n_node_index)):
if n_node_position[i] > position[i]:
index_mod.append(-1)
else:
index_mod.append(1)
return tuple(n_node_index), tuple(index_mod) | Returns nearest node indices and direction of opposite node.
:param position: Position inside the mesh to search nearest node for as (x,y,z)
:return: Nearest node indices and direction of opposite node. | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/mesh/rectilinear.py#L181-L216 | [
"def nearest_node(self, position):\n\n idx = []\n point = []\n for i in range(len(self.axes)):\n if position[i] < self.bounds[0][i] or position[i] > self.bounds[1][i]:\n raise ValueError('The given position is outside the mesh bounds!')\n\n tmp = (np.abs(self.axes[i]-position[i])).argmin()\n idx.append(int(tmp))\n point.append(self.axes[i][tmp])\n\n return tuple(idx), tuple(point), np.linalg.norm(np.asarray(position)-np.asarray(point))\n"
] | class RectilinearMesh(Mesh):
interpolator = RegularGridInterpolator
def __init__(self, axes, axes_names=('x', 'y', 'z'), unit='m'):
""" RectilinearMesh
:param axes: Values of axis nodes as tuple of 1D np.arrays.
:param axes_names: Coordinate system axes names.
:param unit: Unit of mesh values.
"""
bounds = calculate_bounds(axes)
center = calculate_center(bounds)
shape = calculate_shape(axes)
self.__axes = axes
self.__shape = shape
Mesh.__init__(self, bounds, axes_names=axes_names, unit=unit)
self.__center_index = self.nearest_node(center)[0]
def __getitem__(self, item):
# item umpopeln damit tuple of slice passt!
new_axes = []
# This only works when len(item) equals the dimension of the mesh and will not work for None!
for i, x in enumerate(item):
new_axes.append(self.axes[i][x])
return RectilinearMesh(tuple(new_axes), self.axes_names, self.unit)
def copy(self):
new_axes = []
for axe in self.axes:
new_axes.append(axe.copy())
return RectilinearMesh(tuple(new_axes), self.axes_names, self.unit)
def shift(self, offset):
# Update bounds!
low = np.array(self.bounds[0])
high = np.array(self.bounds[1])
tmp = np.array(offset)
self._bounds = (tuple(low+tmp), tuple(high+tmp))
assert len(offset) == len(self.axes)
new_axes = []
for axe in self.axes:
new_axes.append(axe.copy())
for i, d in enumerate(offset):
new_axes[i] += d
self.__axes = tuple(new_axes)
return self
@property
def pitch(self):
dimension = self._dimension # len(self._axes)
pitch = [0.] * dimension
for dim in range(dimension):
axis_len = len(self.__axes[dim])
# create empty numpy array
coordinates = np.zeros(axis_len-1)
for idx in range(axis_len-1):
coordinates[idx] = (self.__axes[dim][idx+1]-self.__axes[dim][idx])
pitch[dim] = coordinates.copy()
return tuple(pitch)
@property
def axes(self):
return self.__axes
@property
def shape(self):
return self.__shape
@property
def center_index(self):
return self.__center_index
@property
def minimum_pitch(self):
""" Returns the minimal pitch between two neighboring nodes of the mesh in each direction.
:return: Minimal pitch in each direction.
"""
pitch = self.pitch
minimal_pitch = []
for p in pitch:
minimal_pitch.append(min(p))
return min(minimal_pitch)
def nearest_node(self, position):
idx = []
point = []
for i in range(len(self.axes)):
if position[i] < self.bounds[0][i] or position[i] > self.bounds[1][i]:
raise ValueError('The given position is outside the mesh bounds!')
tmp = (np.abs(self.axes[i]-position[i])).argmin()
idx.append(int(tmp))
point.append(self.axes[i][tmp])
return tuple(idx), tuple(point), np.linalg.norm(np.asarray(position)-np.asarray(point))
|
cstatz/maui | maui/field/field.py | Field.__rmath | python | def __rmath(self, f, x):
d = {}
if isinstance(x, (int, long, float, complex)):
for i in self.__d: d[i] = f(x , self.__d[i])
else: raise ValueError('Cannot execute reverse operator, only (int, float, complex) as first operand possible')
return d | reverse operator function
:param f: operator.add/sub/mul... used operator
:param x: other object field should be add/sub... with
:return: dictionary (same shape as field.d) with result of operation | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/field/field.py#L337-L349 | null | class Field(object):
def __init__(self, partition, name, unit, rank, bounds=None, distribute=True):
""" Field prototype class.
:param partition: Partition or Mesh, coordinate space associated with the field.
:param name: String, unique name of the field/variable.
:param unit: String, physical unit associated with the field.
:param rank: Interger, rank of the field.
:param interpolation: Interpolator, class to obtain value from the field by coordinate.
:param bounds: 2-Tuple of Tuples, bounds of the field inside the mesh.
"""
self.__rank = rank
self.__name = name
self.__unit = unit
if isinstance(partition, Mesh):
if distribute:
partition = context.create_partition(partition, bounds)
else:
from maui.backend.serial.partition import Partition as SerialPartition
partition = SerialPartition(partition, tuple([1 for _ in range(partition.dimension)]), tuple(0 for _ in range(partition.dimension)))
elif bounds is not None:
partition = partition.copy(bounds)
self.__partition = partition
self.__bounds = self.partition.bounds
#create mask to get Magic Members work with field with bounds not equal mesh bounds
self.__start_index = self.__partition.mesh.nearest_node(self.__bounds[0])[0]
self.__stop_index = self.__partition.mesh.nearest_node(self.__bounds[1])[0]
tmp = list(self.__stop_index)
for i in range(len(tmp)):
tmp[i] += 1
self.__stop_index = tuple(tmp)
self.__mask = create_mask_from_indices((self.__start_index, self.__stop_index))
self.__domain_data = context.setup_domain_data(partition, rank)
self.__d = self.__domain_data.d # Dict of low level data stores
self.__interpolator = dict()
for key in self.__d.keys():
# TODO: Works only for scalar field, should work for vector and tensor also!
self.__interpolator[key] = list()
for i in range(self.partition.mesh.dimension**rank):
if rank == 0:
data = self.__d[key]
else:
data = self.__d[key][i,:]
#mesh = self.partition.domains[key].mesh
#x, y, z = meshgrid(mesh.axes[0], mesh.axes[1], mesh.axes[2])
#points = asarray((x.ravel(order='F'), y.ravel(order='F'), z.ravel(order='F')))
self.__interpolator[key].append(self.partition.domains[key].mesh.interpolator(self.partition.domains[key].mesh.axes, data))
#self.__interpolator[key].append(self.partition.domains[key].mesh.interpolator(points.T, data.ravel(order='F')))
def enlarge(self, add):
pass
def __getitem__(self, index):
return self.__domain_data[index]
def __setitem__(self, index, data):
self.__domain_data[index] = data
@property
def bounds(self):
return self.__bounds
@property
def partition(self):
return self.__partition
@property
def rank(self):
return self.__rank
@property
def name(self):
return self.__name
@property
def unit(self):
return self.__unit
def get_value_by_point(self, point):
tmp = list()
for i in range(self.partition.mesh.dimension**self.rank):
for key in self.__interpolator.keys():
try:
tmp.append(self.__interpolator[key][i](point))
except ValueError:
warnings.warn("The given point is out of mesh bounds!")
if len(tmp) == self.partition.mesh.dimension**self.rank:
return tmp
else:
return None
def cross_mesh_field_evaluation(self, target_field):
#if target_field.rank != self.rank:
# raise ValueError("Rank of target field should by the same as host field!")
if len(target_field.d.keys()) <= len(self.__interpolator.keys()):
key_list = target_field.d.keys()
else:
key_list = self.__interpolator.keys()
for i in range(self.partition.mesh.dimension**self.rank):
for key in key_list:
mesh = target_field.partition.domains[key].mesh
x, y, z = meshgrid(mesh.axes[0], mesh.axes[1], mesh.axes[2], indexing='ij')
points = asarray((x.ravel(order='F'), y.ravel(order='F'), z.ravel(order='F')))
if self.rank == 0:
try:
target_field.d[key].ravel(order='F')[:] = self.__interpolator[key][i](points.T)[:]
except ValueError:
warnings.warn("One of the given points is out of mesh bounds!")
else:
try:
target_field.d[key][i, :] = self.__interpolator[key][i](points.T).reshape(target_field.d[key].shape[1:], order='F')[:]
except ValueError:
warnings.warn("One of the given points is out of mesh bounds!")
@property
def data(self):
""" Enhanced data property.
:return: Returns the associated DataPartition
"""
return self.__domain_data
@property
def d(self):
""" Primitive data property.
:return: dict() of numpy.ndarrays with the keys of the dict being the coordinate of the corresponding Domain
"""
return self.__d
@d.setter
def d(self, ddict):
self.__d = ddict
def sync(self):
self.__domain_data.sync()
def __add__(self, x):
'''Addition with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.add, x)
def __radd__(self, x):
'''reverse Addition with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.add, x)
def __iadd__(self, x):
for i in self.d: self.__d[i][:] = self.__add__(x)[i][:]
return self
def __sub__(self, x):
'''Subtraction with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.sub, x)
def __rsub__(self, x):
'''reverse subtraction with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.sub, x)
def __isub__(self, x):
for i in self.d: self.__d[i][:] = self.__sub__(x)[i][:]
return self
def __mul__(self, x):
'''Multiplication with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.mul, x)
def __rmul__(self, x):
'''reverse multiplication with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.mul, x)
def __imul__(self, x):
for i in self.d: self.__d[i] = self.__mul__(x)[i]
return self
def __div__(self, x):
'''Division with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.div, x)
def __rdiv__(self, x):
'''reverse Division with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.div, x)
def __idiv__(self, x):
for i in self.d: self.__d[i][:] = self.__div__(x)[i][:]
return self
def __truediv__(self, x):
return self.__div__(x)
def __rtruediv__(self, x):
return self.__rdiv__(x)
def __itruediv__(self, x):
return self.__idiv__(x)
def __pow__(self, x):
'''Power with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.pow, x)
def __rpow__(self, x):
'''reverse Power with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.pow, x)
def __ipow__(self, x):
for i in self.d: self.__d[i][:] = self.__pow__(x)[i][:]
return self
def __mod__(self, x):
'''Modulo with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.mod, x)
def __rmod__(self, x):
'''reverse modulo with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.mod, x)
def __imod__(self, x):
for i in self.d: self.__d[i][:] = self.__mod__(x)[i][:]
return self
def __indices(self, slice_partition, slice_mask):
#function to generate Indices for operations with Array and Field with bounds different from mesh bounds
ind=[]
for j in xrange(len(slice_partition)):
if slice_partition[j].start <= slice_mask[j].start:
start = None
else:
start = slice_partition[j].start - slice_mask[j].start
if slice_partition[j].stop >= slice_mask[j].stop:
stop = None
else:
stop = slice_partition[j].stop - slice_mask[j].start
ind.append(slice(start, stop, None))
return tuple(ind)
#defines operator function
def __math(self, f, x):
"""operator function
:param f: operator.add/sub/mul... used operator
:param x: other object field should be add/sub... with
:return: dictionary (same shape as field.d) with result of operation
"""
d = {}
if isinstance(x, (int, long, float, complex)):
for i in self.__d: d[i] = f(self.__d[i], x)
#Operation with other Field or View (same shape, partitions, bounds)
elif isinstance(x, Field) or isinstance(x, View):
try:
for i in x.d: d[i] = f(self.__d[i], x.d[i])
except: raise ValueError('Fields have to be partitioned in the same way and have same shape to be add/sub/mul/div/pow/mod\nView has to have same bounds and origin mesh as Field.')
elif isinstance(x, ndarray):
#array has to be of the same Size as Field
try:
for i in self.d:
#generate Indices if Field has bounds different from mesh-bounds
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as Field for operation')
else: raise ValueError('Operators only available for Field and (Field, numpy.ndarray with same shape as whole Field, integer, float, complex, View).')
return d
def __array_op(self, f, x, axis):
"""operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)
:param f: operator function
:param x: array(1D, 2D) or field (2D) or View (2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:
raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')
d = {}
#x is a vector (only numpy ndarray)
if isinstance(axis, int) and isinstance(x, ndarray):
if len(self.__partition.mesh.bounds[0]) == 3:
try:
for i in self.__d:
try:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
except:
raise ValueError("Indices geht nicht.")
if axis == 0:
d[i] = f(self.__d[i], x[ind[0]][:, newaxis, newaxis])
elif axis == 1:
d[i] = f(self.__d[i], x[ind[1]][:, newaxis])
elif axis == 2:
d[i] = f(self.__d[i], x[ind[2]])
else:
raise ValueError('"axis" can only have value 0, 1 or 2 .')
self.__d[i][:] = d[i][:]
except:
raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
elif len(self.__partition.mesh.bounds[0]) == 2:
try:
for i in self.__d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.__d[i], x[ind[0]][:, newaxis])
elif axis == 1:
d[i] = f(self.__d[i], x[ind[1]][:])
else:
raise ValueError('"axis" can only have value 0 or 2 .')
self.__d[i][:] = d[i][:]
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
#x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)
elif len(axis) == 2:
#operation for 2D-arrays
if isinstance(x, ndarray):
try:
for i in self.__d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == (0, 1) or axis == (1, 0):
d[i] = f(self.__d[i], x[ind[0], ind[1]][:, :, newaxis])
elif axis == (1, 2) or axis == (2, 1):
d[i] = f(self.__d[i], x[ind[1], ind[2]])
elif axis == (0, 2) or axis == (2, 0):
d[i] = f(self.__d[i], x[ind[0], ind[2]][:, newaxis, :])
else:
raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
self.__d[i][:] = d[i][:]
except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))
#operation for 2D Fields or View (Field from same origin mesh but bounds like View has)
elif isinstance(x, Field) or isinstance(x, View):
if axis == (0, 1) or axis == (1, 0):
try:
for i in self.__d: d[i] = f(self.__d[i], x.d[(i[0],i[1])][:, :, newaxis])
except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')
elif axis == (1, 2) or axis == (2, 1):
try:
for i in self.__d: d[i] = f(self.__d[i], x.d[(i[1],i[2])])
except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')
elif axis == (0, 2) or axis == (2, 0):
try:
for i in self.__d: d[i] = f(self.__d[i], x.d[(i[0],i[2])][:, newaxis, :])
except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')
else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')
else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')
return d
def add(self, x, axis):
"""Function to add 3D field with vector or 2D array (type = numpy.ndarray or 2D Field) or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.add, x, axis)
def sub(self, x, axis):
"""Function to sub vector or 2D array (type = numpy.ndarray or 2D Field or View) from 3D field or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.sub, x, axis)
def mul(self, x, axis):
"""Function to mul vector or 2D array (type = numpy.ndarray or 2D Field or View) with 3D field or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.mul, x, axis)
def div(self, x, axis):
"""Function to div 3D field by vector or 2D array (type = numpy.ndarray or 2D Field or View) or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.div, x, axis)
def mod(self, x, axis):
"""Function to modulo 3D field by vector or 2D array (type = numpy.ndarray or 2D Field or View) or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.mod, x, axis)
def pow(self, x, axis):
"""Function have a vector or 2D array (type = numpy.ndarray or 2D Field or View) as power of 3D field or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.pow, x, axis)
|
cstatz/maui | maui/field/field.py | Field.__array_op | python | def __array_op(self, f, x, axis):
if isinstance(x, ndarray) == False and isinstance(x, Field) == False and isinstance(x, View) == False:
raise ValueError('first argument has to be an array of dimension 1 or 2 or an Field or an View of dimension 2')
d = {}
#x is a vector (only numpy ndarray)
if isinstance(axis, int) and isinstance(x, ndarray):
if len(self.__partition.mesh.bounds[0]) == 3:
try:
for i in self.__d:
try:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
except:
raise ValueError("Indices geht nicht.")
if axis == 0:
d[i] = f(self.__d[i], x[ind[0]][:, newaxis, newaxis])
elif axis == 1:
d[i] = f(self.__d[i], x[ind[1]][:, newaxis])
elif axis == 2:
d[i] = f(self.__d[i], x[ind[2]])
else:
raise ValueError('"axis" can only have value 0, 1 or 2 .')
self.__d[i][:] = d[i][:]
except:
raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
elif len(self.__partition.mesh.bounds[0]) == 2:
try:
for i in self.__d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == 0:
d[i] = f(self.__d[i], x[ind[0]][:, newaxis])
elif axis == 1:
d[i] = f(self.__d[i], x[ind[1]][:])
else:
raise ValueError('"axis" can only have value 0 or 2 .')
self.__d[i][:] = d[i][:]
except: raise ValueError('Vector does not have same length as Field along axis %d.' %axis)
#x is a plane (2D-numpy.ndarray or 2D field or View with same partitions, shape and bounds in plane as 3D field)
elif len(axis) == 2:
#operation for 2D-arrays
if isinstance(x, ndarray):
try:
for i in self.__d:
ind = self.__indices(self.partition.meta_data[i], self.__mask)
if axis == (0, 1) or axis == (1, 0):
d[i] = f(self.__d[i], x[ind[0], ind[1]][:, :, newaxis])
elif axis == (1, 2) or axis == (2, 1):
d[i] = f(self.__d[i], x[ind[1], ind[2]])
elif axis == (0, 2) or axis == (2, 0):
d[i] = f(self.__d[i], x[ind[0], ind[2]][:, newaxis, :])
else:
raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
self.__d[i][:] = d[i][:]
except: raise ValueError('2D-Array does not fit to plane %s of Field' %(axis,))
#operation for 2D Fields or View (Field from same origin mesh but bounds like View has)
elif isinstance(x, Field) or isinstance(x, View):
if axis == (0, 1) or axis == (1, 0):
try:
for i in self.__d: d[i] = f(self.__d[i], x.d[(i[0],i[1])][:, :, newaxis])
except: raise ValueError('2D-Field/-View does not fit to field in xy-plane (maybe whole shape or partitions does not fit)')
elif axis == (1, 2) or axis == (2, 1):
try:
for i in self.__d: d[i] = f(self.__d[i], x.d[(i[1],i[2])])
except: raise ValueError('2D-Field/-View does not fit to field in yz-plane (maybe whole shape or partitions does not fit)')
elif axis == (0, 2) or axis == (2, 0):
try:
for i in self.__d: d[i] = f(self.__d[i], x.d[(i[0],i[2])][:, newaxis, :])
except: raise ValueError('2D-Field/-View does not fit to field in xz-plane (maybe whole shape or partitions does not fit)')
else: raise ValueError('Axis-tuple can only contain 0 (x-axis), 1 (y-axis) and 2 (z-axis).')
else: raise ValueError('x has to be an Field, View or numpy.ndarray with 2 dimensions (or an 1D numpy.ndarray (vector))')
else: raise ValueError('Argument "axis" has to be an integer (for vector) or tuple of length 2 (for 2D array or field)')
return d | operation for 3D Field with planes or vector (type = numpy.ndarray) or 2D Field with vector (numpy.ndarray)
:param f: operator function
:param x: array(1D, 2D) or field (2D) or View (2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d) | train | https://github.com/cstatz/maui/blob/db99986e93699ee20c5cffdd5b4ee446f8607c5d/maui/field/field.py#L351-L433 | [
"def __indices(self, slice_partition, slice_mask):\n #function to generate Indices for operations with Array and Field with bounds different from mesh bounds\n ind=[]\n\n for j in xrange(len(slice_partition)):\n if slice_partition[j].start <= slice_mask[j].start:\n start = None\n else:\n start = slice_partition[j].start - slice_mask[j].start\n if slice_partition[j].stop >= slice_mask[j].stop:\n stop = None\n else:\n stop = slice_partition[j].stop - slice_mask[j].start\n ind.append(slice(start, stop, None))\n return tuple(ind)\n"
] | class Field(object):
def __init__(self, partition, name, unit, rank, bounds=None, distribute=True):
""" Field prototype class.
:param partition: Partition or Mesh, coordinate space associated with the field.
:param name: String, unique name of the field/variable.
:param unit: String, physical unit associated with the field.
:param rank: Interger, rank of the field.
:param interpolation: Interpolator, class to obtain value from the field by coordinate.
:param bounds: 2-Tuple of Tuples, bounds of the field inside the mesh.
"""
self.__rank = rank
self.__name = name
self.__unit = unit
if isinstance(partition, Mesh):
if distribute:
partition = context.create_partition(partition, bounds)
else:
from maui.backend.serial.partition import Partition as SerialPartition
partition = SerialPartition(partition, tuple([1 for _ in range(partition.dimension)]), tuple(0 for _ in range(partition.dimension)))
elif bounds is not None:
partition = partition.copy(bounds)
self.__partition = partition
self.__bounds = self.partition.bounds
#create mask to get Magic Members work with field with bounds not equal mesh bounds
self.__start_index = self.__partition.mesh.nearest_node(self.__bounds[0])[0]
self.__stop_index = self.__partition.mesh.nearest_node(self.__bounds[1])[0]
tmp = list(self.__stop_index)
for i in range(len(tmp)):
tmp[i] += 1
self.__stop_index = tuple(tmp)
self.__mask = create_mask_from_indices((self.__start_index, self.__stop_index))
self.__domain_data = context.setup_domain_data(partition, rank)
self.__d = self.__domain_data.d # Dict of low level data stores
self.__interpolator = dict()
for key in self.__d.keys():
# TODO: Works only for scalar field, should work for vector and tensor also!
self.__interpolator[key] = list()
for i in range(self.partition.mesh.dimension**rank):
if rank == 0:
data = self.__d[key]
else:
data = self.__d[key][i,:]
#mesh = self.partition.domains[key].mesh
#x, y, z = meshgrid(mesh.axes[0], mesh.axes[1], mesh.axes[2])
#points = asarray((x.ravel(order='F'), y.ravel(order='F'), z.ravel(order='F')))
self.__interpolator[key].append(self.partition.domains[key].mesh.interpolator(self.partition.domains[key].mesh.axes, data))
#self.__interpolator[key].append(self.partition.domains[key].mesh.interpolator(points.T, data.ravel(order='F')))
def enlarge(self, add):
pass
def __getitem__(self, index):
return self.__domain_data[index]
def __setitem__(self, index, data):
self.__domain_data[index] = data
@property
def bounds(self):
return self.__bounds
@property
def partition(self):
return self.__partition
@property
def rank(self):
return self.__rank
@property
def name(self):
return self.__name
@property
def unit(self):
return self.__unit
def get_value_by_point(self, point):
tmp = list()
for i in range(self.partition.mesh.dimension**self.rank):
for key in self.__interpolator.keys():
try:
tmp.append(self.__interpolator[key][i](point))
except ValueError:
warnings.warn("The given point is out of mesh bounds!")
if len(tmp) == self.partition.mesh.dimension**self.rank:
return tmp
else:
return None
def cross_mesh_field_evaluation(self, target_field):
#if target_field.rank != self.rank:
# raise ValueError("Rank of target field should by the same as host field!")
if len(target_field.d.keys()) <= len(self.__interpolator.keys()):
key_list = target_field.d.keys()
else:
key_list = self.__interpolator.keys()
for i in range(self.partition.mesh.dimension**self.rank):
for key in key_list:
mesh = target_field.partition.domains[key].mesh
x, y, z = meshgrid(mesh.axes[0], mesh.axes[1], mesh.axes[2], indexing='ij')
points = asarray((x.ravel(order='F'), y.ravel(order='F'), z.ravel(order='F')))
if self.rank == 0:
try:
target_field.d[key].ravel(order='F')[:] = self.__interpolator[key][i](points.T)[:]
except ValueError:
warnings.warn("One of the given points is out of mesh bounds!")
else:
try:
target_field.d[key][i, :] = self.__interpolator[key][i](points.T).reshape(target_field.d[key].shape[1:], order='F')[:]
except ValueError:
warnings.warn("One of the given points is out of mesh bounds!")
@property
def data(self):
""" Enhanced data property.
:return: Returns the associated DataPartition
"""
return self.__domain_data
@property
def d(self):
""" Primitive data property.
:return: dict() of numpy.ndarrays with the keys of the dict being the coordinate of the corresponding Domain
"""
return self.__d
@d.setter
def d(self, ddict):
self.__d = ddict
def sync(self):
self.__domain_data.sync()
def __add__(self, x):
'''Addition with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.add, x)
def __radd__(self, x):
'''reverse Addition with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.add, x)
def __iadd__(self, x):
for i in self.d: self.__d[i][:] = self.__add__(x)[i][:]
return self
def __sub__(self, x):
'''Subtraction with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.sub, x)
def __rsub__(self, x):
'''reverse subtraction with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.sub, x)
def __isub__(self, x):
for i in self.d: self.__d[i][:] = self.__sub__(x)[i][:]
return self
def __mul__(self, x):
'''Multiplication with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.mul, x)
def __rmul__(self, x):
'''reverse multiplication with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.mul, x)
def __imul__(self, x):
for i in self.d: self.__d[i] = self.__mul__(x)[i]
return self
def __div__(self, x):
'''Division with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.div, x)
def __rdiv__(self, x):
'''reverse Division with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.div, x)
def __idiv__(self, x):
for i in self.d: self.__d[i][:] = self.__div__(x)[i][:]
return self
def __truediv__(self, x):
return self.__div__(x)
def __rtruediv__(self, x):
return self.__rdiv__(x)
def __itruediv__(self, x):
return self.__idiv__(x)
def __pow__(self, x):
'''Power with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.pow, x)
def __rpow__(self, x):
'''reverse Power with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.pow, x)
def __ipow__(self, x):
for i in self.d: self.__d[i][:] = self.__pow__(x)[i][:]
return self
def __mod__(self, x):
'''Modulo with numpy.ndarray or Field (same shape and Partitions) or single number
:return: dict (same shape as field)
'''
return self.__math(operator.mod, x)
def __rmod__(self, x):
'''reverse modulo with single number (does NOT work with np.ndarray!!!)
:return: dict (same shape as field)
'''
return self.__rmath(operator.mod, x)
def __imod__(self, x):
for i in self.d: self.__d[i][:] = self.__mod__(x)[i][:]
return self
def __indices(self, slice_partition, slice_mask):
#function to generate Indices for operations with Array and Field with bounds different from mesh bounds
ind=[]
for j in xrange(len(slice_partition)):
if slice_partition[j].start <= slice_mask[j].start:
start = None
else:
start = slice_partition[j].start - slice_mask[j].start
if slice_partition[j].stop >= slice_mask[j].stop:
stop = None
else:
stop = slice_partition[j].stop - slice_mask[j].start
ind.append(slice(start, stop, None))
return tuple(ind)
#defines operator function
def __math(self, f, x):
"""operator function
:param f: operator.add/sub/mul... used operator
:param x: other object field should be add/sub... with
:return: dictionary (same shape as field.d) with result of operation
"""
d = {}
if isinstance(x, (int, long, float, complex)):
for i in self.__d: d[i] = f(self.__d[i], x)
#Operation with other Field or View (same shape, partitions, bounds)
elif isinstance(x, Field) or isinstance(x, View):
try:
for i in x.d: d[i] = f(self.__d[i], x.d[i])
except: raise ValueError('Fields have to be partitioned in the same way and have same shape to be add/sub/mul/div/pow/mod\nView has to have same bounds and origin mesh as Field.')
elif isinstance(x, ndarray):
#array has to be of the same Size as Field
try:
for i in self.d:
#generate Indices if Field has bounds different from mesh-bounds
ind = self.__indices(self.__partition.meta_data[i], self.__mask)
d[i] = f(self.d[i], x[ind])
return d
except: raise ValueError('Array has to have same shape as Field for operation')
else: raise ValueError('Operators only available for Field and (Field, numpy.ndarray with same shape as whole Field, integer, float, complex, View).')
return d
def __rmath(self, f, x):
"""reverse operator function
:param f: operator.add/sub/mul... used operator
:param x: other object field should be add/sub... with
:return: dictionary (same shape as field.d) with result of operation
"""
d = {}
if isinstance(x, (int, long, float, complex)):
for i in self.__d: d[i] = f(x , self.__d[i])
else: raise ValueError('Cannot execute reverse operator, only (int, float, complex) as first operand possible')
return d
def add(self, x, axis):
"""Function to add 3D field with vector or 2D array (type = numpy.ndarray or 2D Field) or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.add, x, axis)
def sub(self, x, axis):
"""Function to sub vector or 2D array (type = numpy.ndarray or 2D Field or View) from 3D field or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.sub, x, axis)
def mul(self, x, axis):
"""Function to mul vector or 2D array (type = numpy.ndarray or 2D Field or View) with 3D field or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.mul, x, axis)
def div(self, x, axis):
"""Function to div 3D field by vector or 2D array (type = numpy.ndarray or 2D Field or View) or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.div, x, axis)
def mod(self, x, axis):
"""Function to modulo 3D field by vector or 2D array (type = numpy.ndarray or 2D Field or View) or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.mod, x, axis)
def pow(self, x, axis):
"""Function have a vector or 2D array (type = numpy.ndarray or 2D Field or View) as power of 3D field or 2D Field with vector (type = numpy.ndarray)
:param x: array(1D, 2D) or field (2D) or View(2D)
:param axis: specifies axis, eg. axis = (1,2) plane lies in yz-plane, axis=0 vector along x axis
:return: dict with result of operation (same form as field.d)
"""
return self.__array_op(operator.pow, x, axis)
|
joealcorn/xbox | xbox/client.py | Client._get | python | def _get(self, url, **kw):
'''
Makes a GET request, setting Authorization
header by default
'''
headers = kw.pop('headers', {})
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('Accept', 'application/json')
headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)
kw['headers'] = headers
resp = self.session.get(url, **kw)
self._raise_for_status(resp)
return resp | Makes a GET request, setting Authorization
header by default | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/client.py#L42-L54 | [
"def _raise_for_status(self, response):\n if response.status_code == 400:\n try:\n description = response.json()['description']\n except:\n description = 'Invalid request'\n raise InvalidRequest(description, response=response)\n",
"def get(self, url, **kwargs):\n \"\"\"Sends a GET request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n kwargs.setdefault('allow_redirects', True)\n return self.request('GET', url, **kwargs)\n"
] | class Client(object):
'''
Base API client object handling authentication
and making requests.
A global instance of this is instantiated on import,
all you have to do is call the :meth:`~xbox.Client.authenticate`
method.
:var bool authenticated: whether client is authed
'''
def __init__(self):
self.session = requests.session()
self.authenticated = False
def _raise_for_status(self, response):
if response.status_code == 400:
try:
description = response.json()['description']
except:
description = 'Invalid request'
raise InvalidRequest(description, response=response)
def _post(self, url, **kw):
'''
Makes a POST request, setting Authorization
header by default
'''
headers = kw.pop('headers', {})
headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)
kw['headers'] = headers
resp = self.session.post(url, **kw)
self._raise_for_status(resp)
return resp
def _post_json(self, url, data, **kw):
'''
Makes a POST request, setting Authorization
and Content-Type headers by default
'''
data = json.dumps(data)
headers = kw.pop('headers', {})
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('Accept', 'application/json')
kw['headers'] = headers
kw['data'] = data
return self._post(url, **kw)
def authenticate(self, login=None, password=None):
'''
Authenticated this client instance.
``login`` and ``password`` default to the environment
variables ``MS_LOGIN`` and ``MS_PASSWD`` respectively.
:param login: Email address associated with a microsoft account
:param password: Matching password
:raises: :class:`~xbox.exceptions.AuthenticationException`
:returns: Instance of :class:`~xbox.Client`
'''
if login is None:
login = os.environ.get('MS_LOGIN')
if password is None:
password = os.environ.get('MS_PASSWD')
if not login or not password:
msg = (
'Authentication credentials required. Please refer to '
'http://xbox.readthedocs.org/en/latest/authentication.html'
)
raise AuthenticationException(msg)
self.login = login
# firstly we have to GET the login page and extract
# certain data we need to include in our POST request.
# sadly the data is locked away in some javascript code
base_url = 'https://login.live.com/oauth20_authorize.srf?'
# if the query string is percent-encoded the server
# complains that client_id is missing
qs = unquote(urlencode({
'client_id': '0000000048093EE3',
'redirect_uri': 'https://login.live.com/oauth20_desktop.srf',
'response_type': 'token',
'display': 'touch',
'scope': 'service::user.auth.xboxlive.com::MBI_SSL',
'locale': 'en',
}))
resp = self.session.get(base_url + qs)
# python 3.x will error if this string is not a
# bytes-like object
url_re = b'urlPost:\\\'([A-Za-z0-9:\?_\-\.&/=]+)'
ppft_re = b'sFTTag:\\\'.*value="(.*)"/>'
login_post_url = re.search(url_re, resp.content).group(1)
post_data = {
'login': login,
'passwd': password,
'PPFT': re.search(ppft_re, resp.content).groups(1)[0],
'PPSX': 'Passpor',
'SI': 'Sign in',
'type': '11',
'NewUser': '1',
'LoginOptions': '1',
'i3': '36728',
'm1': '768',
'm2': '1184',
'm3': '0',
'i12': '1',
'i17': '0',
'i18': '__Login_Host|1',
}
resp = self.session.post(
login_post_url, data=post_data, allow_redirects=False,
)
if 'Location' not in resp.headers:
# we can only assume the login failed
msg = 'Could not log in with supplied credentials'
raise AuthenticationException(msg)
# the access token is included in fragment of the location header
location = resp.headers['Location']
parsed = urlparse(location)
fragment = parse_qs(parsed.fragment)
access_token = fragment['access_token'][0]
url = 'https://user.auth.xboxlive.com/user/authenticate'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://auth.xboxlive.com",
"TokenType": "JWT",
"Properties": {
"AuthMethod": "RPS",
"SiteName": "user.auth.xboxlive.com",
"RpsTicket": access_token,
}
}), headers={'Content-Type': 'application/json'})
json_data = resp.json()
user_token = json_data['Token']
uhs = json_data['DisplayClaims']['xui'][0]['uhs']
url = 'https://xsts.auth.xboxlive.com/xsts/authorize'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://xboxlive.com",
"TokenType": "JWT",
"Properties": {
"UserTokens": [user_token],
"SandboxId": "RETAIL",
}
}), headers={'Content-Type': 'application/json'})
response = resp.json()
self.AUTHORIZATION_HEADER = 'XBL3.0 x=%s;%s' % (uhs, response['Token'])
self.user_xid = response['DisplayClaims']['xui'][0]['xid']
self.authenticated = True
return self
def __repr__(self):
if self.authenticated:
return '<xbox.Client: %s>' % self.login
else:
return '<xbox.Client: Unauthenticated>'
|
joealcorn/xbox | xbox/client.py | Client._post | python | def _post(self, url, **kw):
'''
Makes a POST request, setting Authorization
header by default
'''
headers = kw.pop('headers', {})
headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)
kw['headers'] = headers
resp = self.session.post(url, **kw)
self._raise_for_status(resp)
return resp | Makes a POST request, setting Authorization
header by default | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/client.py#L56-L66 | null | class Client(object):
'''
Base API client object handling authentication
and making requests.
A global instance of this is instantiated on import,
all you have to do is call the :meth:`~xbox.Client.authenticate`
method.
:var bool authenticated: whether client is authed
'''
def __init__(self):
self.session = requests.session()
self.authenticated = False
def _raise_for_status(self, response):
if response.status_code == 400:
try:
description = response.json()['description']
except:
description = 'Invalid request'
raise InvalidRequest(description, response=response)
def _get(self, url, **kw):
'''
Makes a GET request, setting Authorization
header by default
'''
headers = kw.pop('headers', {})
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('Accept', 'application/json')
headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)
kw['headers'] = headers
resp = self.session.get(url, **kw)
self._raise_for_status(resp)
return resp
def _post_json(self, url, data, **kw):
'''
Makes a POST request, setting Authorization
and Content-Type headers by default
'''
data = json.dumps(data)
headers = kw.pop('headers', {})
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('Accept', 'application/json')
kw['headers'] = headers
kw['data'] = data
return self._post(url, **kw)
def authenticate(self, login=None, password=None):
'''
Authenticated this client instance.
``login`` and ``password`` default to the environment
variables ``MS_LOGIN`` and ``MS_PASSWD`` respectively.
:param login: Email address associated with a microsoft account
:param password: Matching password
:raises: :class:`~xbox.exceptions.AuthenticationException`
:returns: Instance of :class:`~xbox.Client`
'''
if login is None:
login = os.environ.get('MS_LOGIN')
if password is None:
password = os.environ.get('MS_PASSWD')
if not login or not password:
msg = (
'Authentication credentials required. Please refer to '
'http://xbox.readthedocs.org/en/latest/authentication.html'
)
raise AuthenticationException(msg)
self.login = login
# firstly we have to GET the login page and extract
# certain data we need to include in our POST request.
# sadly the data is locked away in some javascript code
base_url = 'https://login.live.com/oauth20_authorize.srf?'
# if the query string is percent-encoded the server
# complains that client_id is missing
qs = unquote(urlencode({
'client_id': '0000000048093EE3',
'redirect_uri': 'https://login.live.com/oauth20_desktop.srf',
'response_type': 'token',
'display': 'touch',
'scope': 'service::user.auth.xboxlive.com::MBI_SSL',
'locale': 'en',
}))
resp = self.session.get(base_url + qs)
# python 3.x will error if this string is not a
# bytes-like object
url_re = b'urlPost:\\\'([A-Za-z0-9:\?_\-\.&/=]+)'
ppft_re = b'sFTTag:\\\'.*value="(.*)"/>'
login_post_url = re.search(url_re, resp.content).group(1)
post_data = {
'login': login,
'passwd': password,
'PPFT': re.search(ppft_re, resp.content).groups(1)[0],
'PPSX': 'Passpor',
'SI': 'Sign in',
'type': '11',
'NewUser': '1',
'LoginOptions': '1',
'i3': '36728',
'm1': '768',
'm2': '1184',
'm3': '0',
'i12': '1',
'i17': '0',
'i18': '__Login_Host|1',
}
resp = self.session.post(
login_post_url, data=post_data, allow_redirects=False,
)
if 'Location' not in resp.headers:
# we can only assume the login failed
msg = 'Could not log in with supplied credentials'
raise AuthenticationException(msg)
# the access token is included in fragment of the location header
location = resp.headers['Location']
parsed = urlparse(location)
fragment = parse_qs(parsed.fragment)
access_token = fragment['access_token'][0]
url = 'https://user.auth.xboxlive.com/user/authenticate'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://auth.xboxlive.com",
"TokenType": "JWT",
"Properties": {
"AuthMethod": "RPS",
"SiteName": "user.auth.xboxlive.com",
"RpsTicket": access_token,
}
}), headers={'Content-Type': 'application/json'})
json_data = resp.json()
user_token = json_data['Token']
uhs = json_data['DisplayClaims']['xui'][0]['uhs']
url = 'https://xsts.auth.xboxlive.com/xsts/authorize'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://xboxlive.com",
"TokenType": "JWT",
"Properties": {
"UserTokens": [user_token],
"SandboxId": "RETAIL",
}
}), headers={'Content-Type': 'application/json'})
response = resp.json()
self.AUTHORIZATION_HEADER = 'XBL3.0 x=%s;%s' % (uhs, response['Token'])
self.user_xid = response['DisplayClaims']['xui'][0]['xid']
self.authenticated = True
return self
def __repr__(self):
if self.authenticated:
return '<xbox.Client: %s>' % self.login
else:
return '<xbox.Client: Unauthenticated>'
|
joealcorn/xbox | xbox/client.py | Client._post_json | python | def _post_json(self, url, data, **kw):
'''
Makes a POST request, setting Authorization
and Content-Type headers by default
'''
data = json.dumps(data)
headers = kw.pop('headers', {})
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('Accept', 'application/json')
kw['headers'] = headers
kw['data'] = data
return self._post(url, **kw) | Makes a POST request, setting Authorization
and Content-Type headers by default | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/client.py#L68-L80 | null | class Client(object):
'''
Base API client object handling authentication
and making requests.
A global instance of this is instantiated on import,
all you have to do is call the :meth:`~xbox.Client.authenticate`
method.
:var bool authenticated: whether client is authed
'''
def __init__(self):
self.session = requests.session()
self.authenticated = False
def _raise_for_status(self, response):
if response.status_code == 400:
try:
description = response.json()['description']
except:
description = 'Invalid request'
raise InvalidRequest(description, response=response)
def _get(self, url, **kw):
'''
Makes a GET request, setting Authorization
header by default
'''
headers = kw.pop('headers', {})
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('Accept', 'application/json')
headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)
kw['headers'] = headers
resp = self.session.get(url, **kw)
self._raise_for_status(resp)
return resp
def _post(self, url, **kw):
'''
Makes a POST request, setting Authorization
header by default
'''
headers = kw.pop('headers', {})
headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)
kw['headers'] = headers
resp = self.session.post(url, **kw)
self._raise_for_status(resp)
return resp
def authenticate(self, login=None, password=None):
'''
Authenticated this client instance.
``login`` and ``password`` default to the environment
variables ``MS_LOGIN`` and ``MS_PASSWD`` respectively.
:param login: Email address associated with a microsoft account
:param password: Matching password
:raises: :class:`~xbox.exceptions.AuthenticationException`
:returns: Instance of :class:`~xbox.Client`
'''
if login is None:
login = os.environ.get('MS_LOGIN')
if password is None:
password = os.environ.get('MS_PASSWD')
if not login or not password:
msg = (
'Authentication credentials required. Please refer to '
'http://xbox.readthedocs.org/en/latest/authentication.html'
)
raise AuthenticationException(msg)
self.login = login
# firstly we have to GET the login page and extract
# certain data we need to include in our POST request.
# sadly the data is locked away in some javascript code
base_url = 'https://login.live.com/oauth20_authorize.srf?'
# if the query string is percent-encoded the server
# complains that client_id is missing
qs = unquote(urlencode({
'client_id': '0000000048093EE3',
'redirect_uri': 'https://login.live.com/oauth20_desktop.srf',
'response_type': 'token',
'display': 'touch',
'scope': 'service::user.auth.xboxlive.com::MBI_SSL',
'locale': 'en',
}))
resp = self.session.get(base_url + qs)
# python 3.x will error if this string is not a
# bytes-like object
url_re = b'urlPost:\\\'([A-Za-z0-9:\?_\-\.&/=]+)'
ppft_re = b'sFTTag:\\\'.*value="(.*)"/>'
login_post_url = re.search(url_re, resp.content).group(1)
post_data = {
'login': login,
'passwd': password,
'PPFT': re.search(ppft_re, resp.content).groups(1)[0],
'PPSX': 'Passpor',
'SI': 'Sign in',
'type': '11',
'NewUser': '1',
'LoginOptions': '1',
'i3': '36728',
'm1': '768',
'm2': '1184',
'm3': '0',
'i12': '1',
'i17': '0',
'i18': '__Login_Host|1',
}
resp = self.session.post(
login_post_url, data=post_data, allow_redirects=False,
)
if 'Location' not in resp.headers:
# we can only assume the login failed
msg = 'Could not log in with supplied credentials'
raise AuthenticationException(msg)
# the access token is included in fragment of the location header
location = resp.headers['Location']
parsed = urlparse(location)
fragment = parse_qs(parsed.fragment)
access_token = fragment['access_token'][0]
url = 'https://user.auth.xboxlive.com/user/authenticate'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://auth.xboxlive.com",
"TokenType": "JWT",
"Properties": {
"AuthMethod": "RPS",
"SiteName": "user.auth.xboxlive.com",
"RpsTicket": access_token,
}
}), headers={'Content-Type': 'application/json'})
json_data = resp.json()
user_token = json_data['Token']
uhs = json_data['DisplayClaims']['xui'][0]['uhs']
url = 'https://xsts.auth.xboxlive.com/xsts/authorize'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://xboxlive.com",
"TokenType": "JWT",
"Properties": {
"UserTokens": [user_token],
"SandboxId": "RETAIL",
}
}), headers={'Content-Type': 'application/json'})
response = resp.json()
self.AUTHORIZATION_HEADER = 'XBL3.0 x=%s;%s' % (uhs, response['Token'])
self.user_xid = response['DisplayClaims']['xui'][0]['xid']
self.authenticated = True
return self
def __repr__(self):
if self.authenticated:
return '<xbox.Client: %s>' % self.login
else:
return '<xbox.Client: Unauthenticated>'
|
joealcorn/xbox | xbox/client.py | Client.authenticate | python | def authenticate(self, login=None, password=None):
'''
Authenticated this client instance.
``login`` and ``password`` default to the environment
variables ``MS_LOGIN`` and ``MS_PASSWD`` respectively.
:param login: Email address associated with a microsoft account
:param password: Matching password
:raises: :class:`~xbox.exceptions.AuthenticationException`
:returns: Instance of :class:`~xbox.Client`
'''
if login is None:
login = os.environ.get('MS_LOGIN')
if password is None:
password = os.environ.get('MS_PASSWD')
if not login or not password:
msg = (
'Authentication credentials required. Please refer to '
'http://xbox.readthedocs.org/en/latest/authentication.html'
)
raise AuthenticationException(msg)
self.login = login
# firstly we have to GET the login page and extract
# certain data we need to include in our POST request.
# sadly the data is locked away in some javascript code
base_url = 'https://login.live.com/oauth20_authorize.srf?'
# if the query string is percent-encoded the server
# complains that client_id is missing
qs = unquote(urlencode({
'client_id': '0000000048093EE3',
'redirect_uri': 'https://login.live.com/oauth20_desktop.srf',
'response_type': 'token',
'display': 'touch',
'scope': 'service::user.auth.xboxlive.com::MBI_SSL',
'locale': 'en',
}))
resp = self.session.get(base_url + qs)
# python 3.x will error if this string is not a
# bytes-like object
url_re = b'urlPost:\\\'([A-Za-z0-9:\?_\-\.&/=]+)'
ppft_re = b'sFTTag:\\\'.*value="(.*)"/>'
login_post_url = re.search(url_re, resp.content).group(1)
post_data = {
'login': login,
'passwd': password,
'PPFT': re.search(ppft_re, resp.content).groups(1)[0],
'PPSX': 'Passpor',
'SI': 'Sign in',
'type': '11',
'NewUser': '1',
'LoginOptions': '1',
'i3': '36728',
'm1': '768',
'm2': '1184',
'm3': '0',
'i12': '1',
'i17': '0',
'i18': '__Login_Host|1',
}
resp = self.session.post(
login_post_url, data=post_data, allow_redirects=False,
)
if 'Location' not in resp.headers:
# we can only assume the login failed
msg = 'Could not log in with supplied credentials'
raise AuthenticationException(msg)
# the access token is included in fragment of the location header
location = resp.headers['Location']
parsed = urlparse(location)
fragment = parse_qs(parsed.fragment)
access_token = fragment['access_token'][0]
url = 'https://user.auth.xboxlive.com/user/authenticate'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://auth.xboxlive.com",
"TokenType": "JWT",
"Properties": {
"AuthMethod": "RPS",
"SiteName": "user.auth.xboxlive.com",
"RpsTicket": access_token,
}
}), headers={'Content-Type': 'application/json'})
json_data = resp.json()
user_token = json_data['Token']
uhs = json_data['DisplayClaims']['xui'][0]['uhs']
url = 'https://xsts.auth.xboxlive.com/xsts/authorize'
resp = self.session.post(url, data=json.dumps({
"RelyingParty": "http://xboxlive.com",
"TokenType": "JWT",
"Properties": {
"UserTokens": [user_token],
"SandboxId": "RETAIL",
}
}), headers={'Content-Type': 'application/json'})
response = resp.json()
self.AUTHORIZATION_HEADER = 'XBL3.0 x=%s;%s' % (uhs, response['Token'])
self.user_xid = response['DisplayClaims']['xui'][0]['xid']
self.authenticated = True
return self | Authenticated this client instance.
``login`` and ``password`` default to the environment
variables ``MS_LOGIN`` and ``MS_PASSWD`` respectively.
:param login: Email address associated with a microsoft account
:param password: Matching password
:raises: :class:`~xbox.exceptions.AuthenticationException`
:returns: Instance of :class:`~xbox.Client` | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/client.py#L82-L198 | [
"def get(self, url, **kwargs):\n \"\"\"Sends a GET request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n kwargs.setdefault('allow_redirects', True)\n return self.request('GET', url, **kwargs)\n",
"def post(self, url, data=None, **kwargs):\n \"\"\"Sends a POST request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n \"\"\"\n\n return self.request('POST', url, data=data, **kwargs)\n"
] | class Client(object):
'''
Base API client object handling authentication
and making requests.
A global instance of this is instantiated on import,
all you have to do is call the :meth:`~xbox.Client.authenticate`
method.
:var bool authenticated: whether client is authed
'''
def __init__(self):
self.session = requests.session()
self.authenticated = False
def _raise_for_status(self, response):
if response.status_code == 400:
try:
description = response.json()['description']
except:
description = 'Invalid request'
raise InvalidRequest(description, response=response)
def _get(self, url, **kw):
'''
Makes a GET request, setting Authorization
header by default
'''
headers = kw.pop('headers', {})
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('Accept', 'application/json')
headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)
kw['headers'] = headers
resp = self.session.get(url, **kw)
self._raise_for_status(resp)
return resp
def _post(self, url, **kw):
'''
Makes a POST request, setting Authorization
header by default
'''
headers = kw.pop('headers', {})
headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)
kw['headers'] = headers
resp = self.session.post(url, **kw)
self._raise_for_status(resp)
return resp
def _post_json(self, url, data, **kw):
'''
Makes a POST request, setting Authorization
and Content-Type headers by default
'''
data = json.dumps(data)
headers = kw.pop('headers', {})
headers.setdefault('Content-Type', 'application/json')
headers.setdefault('Accept', 'application/json')
kw['headers'] = headers
kw['data'] = data
return self._post(url, **kw)
def __repr__(self):
if self.authenticated:
return '<xbox.Client: %s>' % self.login
else:
return '<xbox.Client: Unauthenticated>'
|
joealcorn/xbox | xbox/resource.py | GamerProfile.from_xuid | python | def from_xuid(cls, xuid):
'''
Instantiates an instance of ``GamerProfile`` from
an xuid
:param xuid: Xuid to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
'''
url = 'https://profile.xboxlive.com/users/xuid(%s)/profile/settings' % xuid
try:
return cls._fetch(url)
except (GamertagNotFound, InvalidRequest):
# this endpoint seems to return 400 when the resource
# does not exist
raise GamertagNotFound('No such user: %s' % xuid) | Instantiates an instance of ``GamerProfile`` from
an xuid
:param xuid: Xuid to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/resource.py#L33-L51 | [
"def inner(*a, **kw):\n if not xbox.client.authenticated:\n xbox.client.authenticate()\n return func(*a, **kw)\n"
] | class GamerProfile(object):
'''
Represents an xbox live user.
:var string xuid: xuid of user
:var string gamertag: gamertag of user
:var string gamerscore: gamerscore of user
:var string gamerpic: url for gamerpic of user
'''
def __init__(self, xuid, settings, user_data):
self.xuid = xuid
self.raw_json = user_data
name_map = {
'Gamertag': 'gamertag',
'Gamerscore': 'gamerscore',
'PublicGamerpic': 'gamerpic',
}
for setting in settings:
if setting['id'] in name_map:
setattr(self, name_map[setting['id']], setting['value'])
@classmethod
@classmethod
def from_gamertag(cls, gamertag):
'''
Instantiates an instance of ``GamerProfile`` from
a gamertag
:param gamertag: Gamertag to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
'''
url = 'https://profile.xboxlive.com/users/gt(%s)/profile/settings' % gamertag
try:
return cls._fetch(url)
except GamertagNotFound:
raise GamertagNotFound('No such user: %s' % gamertag)
@classmethod
@authenticates
def _fetch(cls, base_url):
settings = [
'AppDisplayName',
'DisplayPic',
'Gamerscore',
'Gamertag',
'PublicGamerpic',
'XboxOneRep',
]
qs = '?settings=%s' % ','.join(settings)
headers = {'x-xbl-contract-version': 2}
resp = xbox.client._get(base_url + qs, headers=headers)
if resp.status_code == 404:
raise GamertagNotFound('No such user')
# example payload:
# {
# "profileUsers": [{
# "id": "2533274812246958",
# "hostId": null,
# "settings": [{
# "id": "AppDisplayName",
# "value": "JoeAlcorn"
# }, {
# "id": "DisplayPic",
# "value": "http://compass.xbox.com/assets/70/52/7052948b-c50d-4850-baff-abbcad07b631.jpg?n=004.jpg"
# }, {
# "id": "Gamerscore",
# "value": "22786"
# }, {
# "id": "Gamertag",
# "value": "JoeAlcorn"
# }, {
# "id": "PublicGamerpic",
# "value": "http://images-eds.xboxlive.com/image?url=z951ykn43p4FqWbbFvR2Ec.8vbDhj8G2Xe7JngaTToBrrCmIEEXHC9UNrdJ6P7KIFXxmxGDtE9Vkd62rOpb7JcGvME9LzjeruYo3cC50qVYelz5LjucMJtB5xOqvr7WR"
# }, {
# "id": "XboxOneRep",
# "value": "GoodPlayer"
# }],
# "isSponsoredUser": false
# }]
# }
raw_json = resp.json()
user = raw_json['profileUsers'][0]
return cls(user['id'], user['settings'], raw_json)
def clips(self):
'''
Gets the latest clips made by this user
:returns: Iterator of :class:`~xbox.Clip` instances
'''
return Clip.latest_from_user(self)
def __repr__(self):
return '<xbox.resource.GamerProfile: %s (%s)>' % (
self.gamertag, self.xuid
)
|
joealcorn/xbox | xbox/resource.py | GamerProfile.from_gamertag | python | def from_gamertag(cls, gamertag):
'''
Instantiates an instance of ``GamerProfile`` from
a gamertag
:param gamertag: Gamertag to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
'''
url = 'https://profile.xboxlive.com/users/gt(%s)/profile/settings' % gamertag
try:
return cls._fetch(url)
except GamertagNotFound:
raise GamertagNotFound('No such user: %s' % gamertag) | Instantiates an instance of ``GamerProfile`` from
a gamertag
:param gamertag: Gamertag to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/resource.py#L54-L69 | [
"def inner(*a, **kw):\n if not xbox.client.authenticated:\n xbox.client.authenticate()\n return func(*a, **kw)\n"
] | class GamerProfile(object):
'''
Represents an xbox live user.
:var string xuid: xuid of user
:var string gamertag: gamertag of user
:var string gamerscore: gamerscore of user
:var string gamerpic: url for gamerpic of user
'''
def __init__(self, xuid, settings, user_data):
self.xuid = xuid
self.raw_json = user_data
name_map = {
'Gamertag': 'gamertag',
'Gamerscore': 'gamerscore',
'PublicGamerpic': 'gamerpic',
}
for setting in settings:
if setting['id'] in name_map:
setattr(self, name_map[setting['id']], setting['value'])
@classmethod
def from_xuid(cls, xuid):
'''
Instantiates an instance of ``GamerProfile`` from
an xuid
:param xuid: Xuid to look up
:raises: :class:`~xbox.exceptions.GamertagNotFound`
:returns: :class:`~xbox.GamerProfile` instance
'''
url = 'https://profile.xboxlive.com/users/xuid(%s)/profile/settings' % xuid
try:
return cls._fetch(url)
except (GamertagNotFound, InvalidRequest):
# this endpoint seems to return 400 when the resource
# does not exist
raise GamertagNotFound('No such user: %s' % xuid)
@classmethod
@classmethod
@authenticates
def _fetch(cls, base_url):
settings = [
'AppDisplayName',
'DisplayPic',
'Gamerscore',
'Gamertag',
'PublicGamerpic',
'XboxOneRep',
]
qs = '?settings=%s' % ','.join(settings)
headers = {'x-xbl-contract-version': 2}
resp = xbox.client._get(base_url + qs, headers=headers)
if resp.status_code == 404:
raise GamertagNotFound('No such user')
# example payload:
# {
# "profileUsers": [{
# "id": "2533274812246958",
# "hostId": null,
# "settings": [{
# "id": "AppDisplayName",
# "value": "JoeAlcorn"
# }, {
# "id": "DisplayPic",
# "value": "http://compass.xbox.com/assets/70/52/7052948b-c50d-4850-baff-abbcad07b631.jpg?n=004.jpg"
# }, {
# "id": "Gamerscore",
# "value": "22786"
# }, {
# "id": "Gamertag",
# "value": "JoeAlcorn"
# }, {
# "id": "PublicGamerpic",
# "value": "http://images-eds.xboxlive.com/image?url=z951ykn43p4FqWbbFvR2Ec.8vbDhj8G2Xe7JngaTToBrrCmIEEXHC9UNrdJ6P7KIFXxmxGDtE9Vkd62rOpb7JcGvME9LzjeruYo3cC50qVYelz5LjucMJtB5xOqvr7WR"
# }, {
# "id": "XboxOneRep",
# "value": "GoodPlayer"
# }],
# "isSponsoredUser": false
# }]
# }
raw_json = resp.json()
user = raw_json['profileUsers'][0]
return cls(user['id'], user['settings'], raw_json)
def clips(self):
'''
Gets the latest clips made by this user
:returns: Iterator of :class:`~xbox.Clip` instances
'''
return Clip.latest_from_user(self)
def __repr__(self):
return '<xbox.resource.GamerProfile: %s (%s)>' % (
self.gamertag, self.xuid
)
|
joealcorn/xbox | xbox/resource.py | Clip.get | python | def get(cls, xuid, scid, clip_id):
'''
Gets a specific game clip
:param xuid: xuid of an xbox live user
:param scid: scid of a clip
:param clip_id: id of a clip
'''
url = (
'https://gameclipsmetadata.xboxlive.com/users'
'/xuid(%(xuid)s)/scids/%(scid)s/clips/%(clip_id)s' % {
'xuid': xuid,
'scid': scid,
'clip_id': clip_id,
}
)
resp = xbox.client._get(url)
# scid does not seem to matter when fetching clips,
# as long as it looks like a uuid it should be fine.
# perhaps we'll raise an exception in future
if resp.status_code == 404:
msg = 'Could not find clip: xuid=%s, scid=%s, clip_id=%s' % (
xuid, scid, clip_id,
)
raise ClipNotFound(msg)
data = resp.json()
# as we don't have the user object let's
# create a lazily evaluated proxy object
# that will fetch it only when required
user = UserProxy(xuid)
return cls(user, data['gameClip']) | Gets a specific game clip
:param xuid: xuid of an xbox live user
:param scid: scid of a clip
:param clip_id: id of a clip | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/resource.py#L201-L234 | [
"def _get(self, url, **kw):\n '''\n Makes a GET request, setting Authorization\n header by default\n '''\n headers = kw.pop('headers', {})\n headers.setdefault('Content-Type', 'application/json')\n headers.setdefault('Accept', 'application/json')\n headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)\n kw['headers'] = headers\n resp = self.session.get(url, **kw)\n self._raise_for_status(resp)\n return resp\n"
] | class Clip(object):
'''
Represents a single game clip.
:var user: User that made the clip
:var string clip_id: Unique id of the clip
:var string scid: Unique SCID of the clip
:var string duration: Duration, in seconds, of the clip
:var string name: Name of the clip. Can be ``''``
:var bool saved: Whether the user has saved the clip.
Clips that aren't saved eventually expire
:var string state:
:var string views: Number of views the clip has had
:var string rating: Clip rating
:var string ratings: Number of ratings the clip has received
:var string caption: User-defined clip caption
:var dict thumbnails: Thumbnail URLs for the clip
:var datetime recorded: Date and time clip was made
:var string media_url: Video clip URL
'''
def __init__(self, user, clip_data):
self.raw_json = clip_data
self.user = user
self.clip_id = clip_data['gameClipId']
self.scid = clip_data['scid']
self.duration = clip_data['durationInSeconds']
self.name = clip_data['clipName']
self.saved = clip_data['savedByUser']
self.state = clip_data['state']
self.views = clip_data['views']
self.rating = clip_data['rating']
self.ratings = clip_data['ratingCount']
self.caption = clip_data['userCaption']
self.thumbnails = DotNotationDict()
self.recorded = datetime.strptime(
clip_data['dateRecorded'], '%Y-%m-%dT%H:%M:%SZ'
)
# thumbnails and media_url may not yet exist
# if the state of the clip is PendingUpload
self.thumbnails.small = None
self.thumbnails.large = None
for thumb in clip_data['thumbnails']:
if thumb['thumbnailType'] == 'Small':
self.thumbnails.small = thumb['uri']
elif thumb['thumbnailType'] == 'Large':
self.thumbnails.large = thumb['uri']
self.media_url = None
for uri in clip_data['gameClipUris']:
if uri['uriType'] == 'Download':
self.media_url = uri['uri']
def __getstate__(self):
return (self.raw_json, self.user)
def __setstate__(self, data):
clip_data = data[0]
user = data[1]
self.__init__(user, clip_data)
@classmethod
@authenticates
@classmethod
@authenticates
def saved_from_user(cls, user, include_pending=False):
'''
Gets all clips 'saved' by a user.
:param user: :class:`~xbox.GamerProfile` instance
:param bool include_pending: whether to ignore clips that are not
yet uploaded. These clips will have thumbnails and media_url
set to ``None``
:returns: Iterator of :class:`~xbox.Clip` instances
'''
url = 'https://gameclipsmetadata.xboxlive.com/users/xuid(%s)/clips/saved'
resp = xbox.client._get(url % user.xuid)
data = resp.json()
for clip in data['gameClips']:
if clip['state'] != 'PendingUpload' or include_pending:
yield cls(user, clip)
@classmethod
@authenticates
def latest_from_user(cls, user, include_pending=False):
'''
Gets all clips, saved and unsaved
:param user: :class:`~xbox.GamerProfile` instance
:param bool include_pending: whether to ignore clips that are not
yet uploaded. These clips will have thumbnails and media_url
set to ``None``
:returns: Iterator of :class:`~xbox.Clip` instances
'''
url = 'https://gameclipsmetadata.xboxlive.com/users/xuid(%s)/clips'
resp = xbox.client._get(url % user.xuid)
data = resp.json()
for clip in data['gameClips']:
if clip['state'] != 'PendingUpload' or include_pending:
yield cls(user, clip)
|
joealcorn/xbox | xbox/resource.py | Clip.saved_from_user | python | def saved_from_user(cls, user, include_pending=False):
'''
Gets all clips 'saved' by a user.
:param user: :class:`~xbox.GamerProfile` instance
:param bool include_pending: whether to ignore clips that are not
yet uploaded. These clips will have thumbnails and media_url
set to ``None``
:returns: Iterator of :class:`~xbox.Clip` instances
'''
url = 'https://gameclipsmetadata.xboxlive.com/users/xuid(%s)/clips/saved'
resp = xbox.client._get(url % user.xuid)
data = resp.json()
for clip in data['gameClips']:
if clip['state'] != 'PendingUpload' or include_pending:
yield cls(user, clip) | Gets all clips 'saved' by a user.
:param user: :class:`~xbox.GamerProfile` instance
:param bool include_pending: whether to ignore clips that are not
yet uploaded. These clips will have thumbnails and media_url
set to ``None``
:returns: Iterator of :class:`~xbox.Clip` instances | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/resource.py#L238-L254 | [
"def _get(self, url, **kw):\n '''\n Makes a GET request, setting Authorization\n header by default\n '''\n headers = kw.pop('headers', {})\n headers.setdefault('Content-Type', 'application/json')\n headers.setdefault('Accept', 'application/json')\n headers.setdefault('Authorization', self.AUTHORIZATION_HEADER)\n kw['headers'] = headers\n resp = self.session.get(url, **kw)\n self._raise_for_status(resp)\n return resp\n"
] | class Clip(object):
'''
Represents a single game clip.
:var user: User that made the clip
:var string clip_id: Unique id of the clip
:var string scid: Unique SCID of the clip
:var string duration: Duration, in seconds, of the clip
:var string name: Name of the clip. Can be ``''``
:var bool saved: Whether the user has saved the clip.
Clips that aren't saved eventually expire
:var string state:
:var string views: Number of views the clip has had
:var string rating: Clip rating
:var string ratings: Number of ratings the clip has received
:var string caption: User-defined clip caption
:var dict thumbnails: Thumbnail URLs for the clip
:var datetime recorded: Date and time clip was made
:var string media_url: Video clip URL
'''
def __init__(self, user, clip_data):
self.raw_json = clip_data
self.user = user
self.clip_id = clip_data['gameClipId']
self.scid = clip_data['scid']
self.duration = clip_data['durationInSeconds']
self.name = clip_data['clipName']
self.saved = clip_data['savedByUser']
self.state = clip_data['state']
self.views = clip_data['views']
self.rating = clip_data['rating']
self.ratings = clip_data['ratingCount']
self.caption = clip_data['userCaption']
self.thumbnails = DotNotationDict()
self.recorded = datetime.strptime(
clip_data['dateRecorded'], '%Y-%m-%dT%H:%M:%SZ'
)
# thumbnails and media_url may not yet exist
# if the state of the clip is PendingUpload
self.thumbnails.small = None
self.thumbnails.large = None
for thumb in clip_data['thumbnails']:
if thumb['thumbnailType'] == 'Small':
self.thumbnails.small = thumb['uri']
elif thumb['thumbnailType'] == 'Large':
self.thumbnails.large = thumb['uri']
self.media_url = None
for uri in clip_data['gameClipUris']:
if uri['uriType'] == 'Download':
self.media_url = uri['uri']
def __getstate__(self):
return (self.raw_json, self.user)
def __setstate__(self, data):
clip_data = data[0]
user = data[1]
self.__init__(user, clip_data)
@classmethod
@authenticates
def get(cls, xuid, scid, clip_id):
'''
Gets a specific game clip
:param xuid: xuid of an xbox live user
:param scid: scid of a clip
:param clip_id: id of a clip
'''
url = (
'https://gameclipsmetadata.xboxlive.com/users'
'/xuid(%(xuid)s)/scids/%(scid)s/clips/%(clip_id)s' % {
'xuid': xuid,
'scid': scid,
'clip_id': clip_id,
}
)
resp = xbox.client._get(url)
# scid does not seem to matter when fetching clips,
# as long as it looks like a uuid it should be fine.
# perhaps we'll raise an exception in future
if resp.status_code == 404:
msg = 'Could not find clip: xuid=%s, scid=%s, clip_id=%s' % (
xuid, scid, clip_id,
)
raise ClipNotFound(msg)
data = resp.json()
# as we don't have the user object let's
# create a lazily evaluated proxy object
# that will fetch it only when required
user = UserProxy(xuid)
return cls(user, data['gameClip'])
@classmethod
@authenticates
@classmethod
@authenticates
def latest_from_user(cls, user, include_pending=False):
'''
Gets all clips, saved and unsaved
:param user: :class:`~xbox.GamerProfile` instance
:param bool include_pending: whether to ignore clips that are not
yet uploaded. These clips will have thumbnails and media_url
set to ``None``
:returns: Iterator of :class:`~xbox.Clip` instances
'''
url = 'https://gameclipsmetadata.xboxlive.com/users/xuid(%s)/clips'
resp = xbox.client._get(url % user.xuid)
data = resp.json()
for clip in data['gameClips']:
if clip['state'] != 'PendingUpload' or include_pending:
yield cls(user, clip)
|
joealcorn/xbox | xbox/vendor/requests/packages/urllib3/util/retry.py | Retry.increment | python | def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None):
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
else:
# FIXME: Nothing changed, scenario doesn't make sense.
_observed_errors += 1
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error)
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry | Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/packages/urllib3/util/retry.py#L210-L269 | [
"def reraise(tp, value, tb=None):\n if value.__traceback__ is not tb:\n raise value.with_traceback(tb)\n raise value\n",
"def new(self, **kw):\n params = dict(\n total=self.total,\n connect=self.connect, read=self.read, redirect=self.redirect,\n method_whitelist=self.method_whitelist,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n raise_on_redirect=self.raise_on_redirect,\n _observed_errors=self._observed_errors,\n )\n params.update(kw)\n return type(self)(**params)\n",
"def _is_connection_error(self, err):\n \"\"\" Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n return isinstance(err, ConnectTimeoutError)\n",
"def is_exhausted(self):\n \"\"\" Are we out of retries?\n \"\"\"\n retry_counts = (self.total, self.connect, self.read, self.redirect)\n retry_counts = list(filter(None, retry_counts))\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n"
] | class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.MAX_BACKOFF`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we can't
assume that the server did not process any of it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/response retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries?
"""
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
|
joealcorn/xbox | xbox/vendor/requests/packages/urllib3/response.py | HTTPResponse.read | python | def read(self, amt=None, decode_content=None, cache_content=False):
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if not 'read operation timed out' in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn() | Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.) | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/packages/urllib3/response.py#L143-L237 | [
"def _get_decoder(mode):\n if mode == 'gzip':\n return zlib.decompressobj(16 + zlib.MAX_WBITS)\n\n return DeflateDecoder()\n"
] | class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = HTTPHeaderDict()
if headers:
self.headers.update(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = HTTPHeaderDict()
for k, v in r.getheaders():
headers.add(k, v)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
|
joealcorn/xbox | xbox/vendor/requests/packages/urllib3/response.py | HTTPResponse.from_httplib | python | def from_httplib(ResponseCls, r, **response_kw):
headers = HTTPHeaderDict()
for k, v in r.getheaders():
headers.add(k, v)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw) | Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/packages/urllib3/response.py#L262-L284 | null | class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = HTTPHeaderDict()
if headers:
self.headers.update(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
if body and isinstance(body, (basestring, binary_type)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if not 'read operation timed out' in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, 'Read timed out.')
except HTTPException as e:
# This includes IncompleteRead.
raise ProtocolError('Connection broken: %r' % e, e)
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding, e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[:len(temp)] = temp
return len(temp)
|
joealcorn/xbox | xbox/vendor/requests/packages/urllib3/_collections.py | HTTPHeaderDict.add | python | def add(self, key, value):
self._data.setdefault(key.lower(), []).append((key, value)) | Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz' | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/packages/urllib3/_collections.py#L151-L160 | null | class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
If you want to access the raw headers with their original casing
for debugging purposes you can access the private ``._data`` attribute
which is a normal python ``dict`` that maps the case-insensitive key to a
list of tuples stored as (case-sensitive-original-name, value). Using the
structure from above as our example:
>>> headers._data
{'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')],
'content-length': [('content-length', '7')]}
"""
def __init__(self, headers=None, **kwargs):
self._data = {}
if headers is None:
headers = {}
self.update(headers, **kwargs)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
return self[key].split(', ') if key in self else []
def copy(self):
h = HTTPHeaderDict()
for key in self._data:
for rawkey, value in self._data[key]:
h.add(rawkey, value)
return h
def __eq__(self, other):
if not isinstance(other, Mapping):
return False
other = HTTPHeaderDict(other)
return dict((k1, self[k1]) for k1 in self._data) == \
dict((k2, other[k2]) for k2 in other._data)
def __getitem__(self, key):
values = self._data[key.lower()]
return ', '.join(value[1] for value in values)
def __setitem__(self, key, value):
self._data[key.lower()] = [(key, value)]
def __delitem__(self, key):
del self._data[key.lower()]
def __len__(self):
return len(self._data)
def __iter__(self):
for headers in itervalues(self._data):
yield headers[0][0]
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
|
joealcorn/xbox | xbox/vendor/requests/auth.py | HTTPDigestAuth.handle_401 | python | def handle_401(self, r, **kwargs):
if self.pos is not None:
# Rewind the file position indicator of the body to where
# it was to resend the request.
r.request.body.seek(self.pos)
num_401_calls = getattr(self, 'num_401_calls', 1)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
setattr(self, 'num_401_calls', num_401_calls + 1)
pat = re.compile(r'digest ', flags=re.IGNORECASE)
self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
prep = r.request.copy()
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
prep.prepare_cookies(prep._cookies)
prep.headers['Authorization'] = self.build_digest_header(
prep.method, prep.url)
_r = r.connection.send(prep, **kwargs)
_r.history.append(r)
_r.request = prep
return _r
setattr(self, 'num_401_calls', 1)
return r | Takes the given response and tries digest-auth, if needed. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/auth.py#L153-L186 | [
"def extract_cookies_to_jar(jar, request, response):\n \"\"\"Extract the cookies from the response into a CookieJar.\n\n :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)\n :param request: our own requests.Request object\n :param response: urllib3.HTTPResponse object\n \"\"\"\n if not (hasattr(response, '_original_response') and\n response._original_response):\n return\n # the _original_response field is the wrapped httplib.HTTPResponse object,\n req = MockRequest(request)\n # pull out the HTTPMessage with the headers and put it in the mock:\n res = MockResponse(response._original_response.msg)\n jar.extract_cookies(res, req)\n",
"def parse_dict_header(value):\n \"\"\"Parse lists of key, value pairs as described by RFC 2068 Section 2 and\n convert them into a python dict:\n\n >>> d = parse_dict_header('foo=\"is a fish\", bar=\"as well\"')\n >>> type(d) is dict\n True\n >>> sorted(d.items())\n [('bar', 'as well'), ('foo', 'is a fish')]\n\n If there is no value for a key it will be `None`:\n\n >>> parse_dict_header('key_without_value')\n {'key_without_value': None}\n\n To create a header from the :class:`dict` again, use the\n :func:`dump_header` function.\n\n :param value: a string with a dict header.\n :return: :class:`dict`\n \"\"\"\n result = {}\n for item in _parse_list_header(value):\n if '=' not in item:\n result[item] = None\n continue\n name, value = item.split('=', 1)\n if value[:1] == value[-1:] == '\"':\n value = unquote_header_value(value[1:-1])\n result[name] = value\n return result\n",
"def build_digest_header(self, method, url):\n\n realm = self.chal['realm']\n nonce = self.chal['nonce']\n qop = self.chal.get('qop')\n algorithm = self.chal.get('algorithm')\n opaque = self.chal.get('opaque')\n\n if algorithm is None:\n _algorithm = 'MD5'\n else:\n _algorithm = algorithm.upper()\n # lambdas assume digest modules are imported at the top level\n if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':\n def md5_utf8(x):\n if isinstance(x, str):\n x = x.encode('utf-8')\n return hashlib.md5(x).hexdigest()\n hash_utf8 = md5_utf8\n elif _algorithm == 'SHA':\n def sha_utf8(x):\n if isinstance(x, str):\n x = x.encode('utf-8')\n return hashlib.sha1(x).hexdigest()\n hash_utf8 = sha_utf8\n\n KD = lambda s, d: hash_utf8(\"%s:%s\" % (s, d))\n\n if hash_utf8 is None:\n return None\n\n # XXX not implemented yet\n entdig = None\n p_parsed = urlparse(url)\n path = p_parsed.path\n if p_parsed.query:\n path += '?' + p_parsed.query\n\n A1 = '%s:%s:%s' % (self.username, realm, self.password)\n A2 = '%s:%s' % (method, path)\n\n HA1 = hash_utf8(A1)\n HA2 = hash_utf8(A2)\n\n if nonce == self.last_nonce:\n self.nonce_count += 1\n else:\n self.nonce_count = 1\n ncvalue = '%08x' % self.nonce_count\n s = str(self.nonce_count).encode('utf-8')\n s += nonce.encode('utf-8')\n s += time.ctime().encode('utf-8')\n s += os.urandom(8)\n\n cnonce = (hashlib.sha1(s).hexdigest()[:16])\n noncebit = \"%s:%s:%s:%s:%s\" % (nonce, ncvalue, cnonce, qop, HA2)\n if _algorithm == 'MD5-SESS':\n HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))\n\n if qop is None:\n respdig = KD(HA1, \"%s:%s\" % (nonce, HA2))\n elif qop == 'auth' or 'auth' in qop.split(','):\n respdig = KD(HA1, noncebit)\n else:\n # XXX handle auth-int.\n return None\n\n self.last_nonce = nonce\n\n # XXX should the partial digests be encoded too?\n base = 'username=\"%s\", realm=\"%s\", nonce=\"%s\", uri=\"%s\", ' \\\n 'response=\"%s\"' % (self.username, realm, nonce, path, respdig)\n if opaque:\n base += ', opaque=\"%s\"' % opaque\n if algorithm:\n base += ', algorithm=\"%s\"' % algorithm\n if entdig:\n base += ', digest=\"%s\"' % entdig\n if qop:\n base += ', qop=\"auth\", nc=%s, cnonce=\"%s\"' % (ncvalue, cnonce)\n\n return 'Digest %s' % (base)\n"
] | class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
self.pos = None
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm')
opaque = self.chal.get('opaque')
if algorithm is None:
_algorithm = 'MD5'
else:
_algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif _algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
HA1 = hash_utf8(A1)
HA2 = hash_utf8(A2)
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
if _algorithm == 'MD5-SESS':
HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
if qop is None:
respdig = KD(HA1, "%s:%s" % (nonce, HA2))
elif qop == 'auth' or 'auth' in qop.split(','):
respdig = KD(HA1, noncebit)
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if algorithm:
base += ', algorithm="%s"' % algorithm
if entdig:
base += ', digest="%s"' % entdig
if qop:
base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
try:
self.pos = r.body.tell()
except AttributeError:
pass
r.register_hook('response', self.handle_401)
return r
|
joealcorn/xbox | xbox/vendor/requests/models.py | Request.prepare | python | def prepare(self):
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p | Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/models.py#L240-L254 | [
"def prepare(self, method=None, url=None, headers=None, files=None,\n data=None, params=None, auth=None, cookies=None, hooks=None):\n \"\"\"Prepares the entire request with the given parameters.\"\"\"\n\n self.prepare_method(method)\n self.prepare_url(url, params)\n self.prepare_headers(headers)\n self.prepare_cookies(cookies)\n self.prepare_body(data, files)\n self.prepare_auth(auth, url)\n # Note that prepare_auth must be last to enable authentication schemes\n # such as OAuth to work on a fully prepared request.\n\n # This MUST go after prepare_auth. Authenticators could add a hook\n self.prepare_hooks(hooks)\n"
] | class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
|
joealcorn/xbox | xbox/vendor/requests/models.py | PreparedRequest.prepare_url | python | def prepare_url(self, url, params):
url = to_native_string(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url | Prepares the given HTTP URL. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/models.py#L326-L385 | [
"def parse_url(url):\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n\n Partly backwards-compatible with :mod:`urlparse`.\n\n Example::\n\n >>> parse_url('http://google.com/mail/')\n Url(scheme='http', host='google.com', port=None, path='/', ...)\n >>> parse_url('google.com:80')\n Url(scheme=None, host='google.com', port=80, path=None, ...)\n >>> parse_url('/foo?bar')\n Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n\n # While this code has overlap with stdlib's urlparse, it is much\n # simplified for our needs and less annoying.\n # Additionally, this implementations does silly things to be optimal\n # on CPython.\n\n if not url:\n # Empty\n return Url()\n\n scheme = None\n auth = None\n host = None\n port = None\n path = None\n fragment = None\n query = None\n\n # Scheme\n if '://' in url:\n scheme, url = url.split('://', 1)\n\n # Find the earliest Authority Terminator\n # (http://tools.ietf.org/html/rfc3986#section-3.2)\n url, path_, delim = split_first(url, ['/', '?', '#'])\n\n if delim:\n # Reassemble the path\n path = delim + path_\n\n # Auth\n if '@' in url:\n # Last '@' denotes end of auth part\n auth, url = url.rsplit('@', 1)\n\n # IPv6\n if url and url[0] == '[':\n host, url = url.split(']', 1)\n host += ']'\n\n # Port\n if ':' in url:\n _host, port = url.split(':', 1)\n\n if not host:\n host = _host\n\n if port:\n # If given, ports must be integers.\n if not port.isdigit():\n raise LocationParseError(url)\n port = int(port)\n else:\n # Blank ports are cool, too. (rfc3986#section-3.2.3)\n port = None\n\n elif not host and url:\n host = url\n\n if not path:\n return Url(scheme, auth, host, port, path, query, fragment)\n\n # Fragment\n if '#' in path:\n path, fragment = path.split('#', 1)\n\n # Query\n if '?' in path:\n path, query = path.split('?', 1)\n\n return Url(scheme, auth, host, port, path, query, fragment)\n",
"def to_native_string(string, encoding='ascii'):\n \"\"\"\n Given a string object, regardless of type, returns a representation of that\n string in the native string type, encoding and decoding where necessary.\n This assumes ASCII unless told otherwise.\n \"\"\"\n out = None\n\n if isinstance(string, builtin_str):\n out = string\n else:\n if is_py2:\n out = string.encode(encoding)\n else:\n out = string.decode(encoding)\n\n return out\n",
"def requote_uri(uri):\n \"\"\"Re-quote the given URI.\n\n This function passes the given URI through an unquote/quote cycle to\n ensure that it is fully and consistently quoted.\n \"\"\"\n # Unquote only the unreserved characters\n # Then quote only illegal characters (do not quote reserved, unreserved,\n # or '%')\n return quote(unquote_unreserved(uri), safe=\"!#$%&'()*+,/:;=?@[]~\")\n",
"def _encode_params(data):\n \"\"\"Encode parameters in a piece of data.\n\n Will successfully encode parameters when passed as a dict or a list of\n 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary\n if parameters are supplied as a dict.\n \"\"\"\n\n if isinstance(data, (str, bytes)):\n return data\n elif hasattr(data, 'read'):\n return data\n elif hasattr(data, '__iter__'):\n result = []\n for k, vs in to_key_val_list(data):\n if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):\n vs = [vs]\n for v in vs:\n if v is not None:\n result.append(\n (k.encode('utf-8') if isinstance(k, str) else k,\n v.encode('utf-8') if isinstance(v, str) else v))\n return urlencode(result, doseq=True)\n else:\n return data\n"
] | class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = self._cookies.copy() if self._cookies is not None else None
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
|
joealcorn/xbox | xbox/vendor/requests/models.py | PreparedRequest.prepare_body | python | def prepare_body(self, data, files):
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body | Prepares the given HTTP body data. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/models.py#L395-L444 | [
"def super_len(o):\n if hasattr(o, '__len__'):\n return len(o)\n\n if hasattr(o, 'len'):\n return o.len\n\n if hasattr(o, 'fileno'):\n try:\n fileno = o.fileno()\n except io.UnsupportedOperation:\n pass\n else:\n return os.fstat(fileno).st_size\n\n if hasattr(o, 'getvalue'):\n # e.g. BytesIO, cStringIO.StringIO\n return len(o.getvalue())\n",
"def _encode_params(data):\n \"\"\"Encode parameters in a piece of data.\n\n Will successfully encode parameters when passed as a dict or a list of\n 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary\n if parameters are supplied as a dict.\n \"\"\"\n\n if isinstance(data, (str, bytes)):\n return data\n elif hasattr(data, 'read'):\n return data\n elif hasattr(data, '__iter__'):\n result = []\n for k, vs in to_key_val_list(data):\n if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):\n vs = [vs]\n for v in vs:\n if v is not None:\n result.append(\n (k.encode('utf-8') if isinstance(k, str) else k,\n v.encode('utf-8') if isinstance(v, str) else v))\n return urlencode(result, doseq=True)\n else:\n return data\n",
"def _encode_files(files, data):\n \"\"\"Build the body for a multipart/form-data request.\n\n Will successfully encode files when passed as a dict or a list of\n 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary\n if parameters are supplied as a dict.\n\n \"\"\"\n if (not files):\n raise ValueError(\"Files must be provided.\")\n elif isinstance(data, basestring):\n raise ValueError(\"Data must not be a string.\")\n\n new_fields = []\n fields = to_key_val_list(data or {})\n files = to_key_val_list(files or {})\n\n for field, val in fields:\n if isinstance(val, basestring) or not hasattr(val, '__iter__'):\n val = [val]\n for v in val:\n if v is not None:\n # Don't call str() on bytestrings: in Py3 it all goes wrong.\n if not isinstance(v, bytes):\n v = str(v)\n\n new_fields.append(\n (field.decode('utf-8') if isinstance(field, bytes) else field,\n v.encode('utf-8') if isinstance(v, str) else v))\n\n for (k, v) in files:\n # support for explicit filename\n ft = None\n fh = None\n if isinstance(v, (tuple, list)):\n if len(v) == 2:\n fn, fp = v\n elif len(v) == 3:\n fn, fp, ft = v\n else:\n fn, fp, ft, fh = v\n else:\n fn = guess_filename(v) or k\n fp = v\n if isinstance(fp, str):\n fp = StringIO(fp)\n if isinstance(fp, bytes):\n fp = BytesIO(fp)\n\n rf = RequestField(name=k, data=fp.read(),\n filename=fn, headers=fh)\n rf.make_multipart(content_type=ft)\n new_fields.append(rf)\n\n body, content_type = encode_multipart_formdata(new_fields)\n\n return body, content_type\n",
"def prepare_content_length(self, body):\n if hasattr(body, 'seek') and hasattr(body, 'tell'):\n body.seek(0, 2)\n self.headers['Content-Length'] = builtin_str(body.tell())\n body.seek(0, 0)\n elif body is not None:\n l = super_len(body)\n if l:\n self.headers['Content-Length'] = builtin_str(l)\n elif self.method not in ('GET', 'HEAD'):\n self.headers['Content-Length'] = '0'\n"
] | class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = self._cookies.copy() if self._cookies is not None else None
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
url = to_native_string(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
|
joealcorn/xbox | xbox/vendor/requests/adapters.py | HTTPAdapter.request_url | python | def request_url(self, request, proxies):
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url | Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/adapters.py#L255-L277 | null | class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and
timeouts, never to requests where the server returns a response.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=Retry(self.max_retries, read=False),
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
|
joealcorn/xbox | xbox/vendor/requests/adapters.py | HTTPAdapter.send | python | def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=Retry(self.max_retries, read=False),
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp) | Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/adapters.py#L315-L426 | [
"def cert_verify(self, conn, url, verify, cert):\n \"\"\"Verify a SSL certificate. This method should not be called from user\n code, and is only exposed for use when subclassing the\n :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.\n\n :param conn: The urllib3 connection object associated with the cert.\n :param url: The requested URL.\n :param verify: Whether we should actually verify the certificate.\n :param cert: The SSL certificate to verify.\n \"\"\"\n if url.lower().startswith('https') and verify:\n\n cert_loc = None\n\n # Allow self-specified cert location.\n if verify is not True:\n cert_loc = verify\n\n if not cert_loc:\n cert_loc = DEFAULT_CA_BUNDLE_PATH\n\n if not cert_loc:\n raise Exception(\"Could not find a suitable SSL CA certificate bundle.\")\n\n conn.cert_reqs = 'CERT_REQUIRED'\n conn.ca_certs = cert_loc\n else:\n conn.cert_reqs = 'CERT_NONE'\n conn.ca_certs = None\n\n if cert:\n if not isinstance(cert, basestring):\n conn.cert_file = cert[0]\n conn.key_file = cert[1]\n else:\n conn.cert_file = cert\n",
"def get_connection(self, url, proxies=None):\n \"\"\"Returns a urllib3 connection for the given URL. This should not be\n called from user code, and is only exposed for use when subclassing the\n :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.\n\n :param url: The URL to connect to.\n :param proxies: (optional) A Requests-style dictionary of proxies used on this request.\n \"\"\"\n proxies = proxies or {}\n proxy = proxies.get(urlparse(url.lower()).scheme)\n\n if proxy:\n proxy = prepend_scheme_if_needed(proxy, 'http')\n proxy_manager = self.proxy_manager_for(proxy)\n conn = proxy_manager.connection_from_url(url)\n else:\n # Only scheme should be lower case\n parsed = urlparse(url)\n url = parsed.geturl()\n conn = self.poolmanager.connection_from_url(url)\n\n return conn\n",
"def request_url(self, request, proxies):\n \"\"\"Obtain the url to use when making the final request.\n\n If the message is being sent through a HTTP proxy, the full URL has to\n be used. Otherwise, we should only use the path portion of the URL.\n\n This should not be called from user code, and is only exposed for use\n when subclassing the\n :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.\n\n :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.\n :param proxies: A dictionary of schemes to proxy URLs.\n \"\"\"\n proxies = proxies or {}\n scheme = urlparse(request.url).scheme\n proxy = proxies.get(scheme)\n\n if proxy and scheme != 'https':\n url, _ = urldefrag(request.url)\n else:\n url = request.path_url\n\n return url\n",
"def add_headers(self, request, **kwargs):\n \"\"\"Add any headers needed by the connection. As of v2.0 this does\n nothing by default, but is left for overriding by users that subclass\n the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.\n\n This should not be called from user code, and is only exposed for use\n when subclassing the\n :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.\n\n :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.\n :param kwargs: The keyword arguments from the call to send().\n \"\"\"\n pass\n"
] | class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed connections and
timeouts, never to requests where the server returns a response.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
self.max_retries = max_retries
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url, _ = urldefrag(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
|
joealcorn/xbox | xbox/vendor/requests/utils.py | get_encodings_from_content | python | def get_encodings_from_content(content):
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content)) | Returns encodings from given content string.
:param content: bytestring to extract encodings from. | train | https://github.com/joealcorn/xbox/blob/3d2aeba10244dcb58d714d76fc88487c74bd1510/xbox/vendor/requests/utils.py#L285-L297 | null | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.