hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f733730f2dcf4904be19aad45f5b8b92056240ee | 253 | py | Python | youtubeviewer/colors.py | Kraphyl/YouTube-Viewer | 5fd46052984df5777fa57e140a8b37c1e226eb03 | [
"MIT"
] | null | null | null | youtubeviewer/colors.py | Kraphyl/YouTube-Viewer | 5fd46052984df5777fa57e140a8b37c1e226eb03 | [
"MIT"
] | null | null | null | youtubeviewer/colors.py | Kraphyl/YouTube-Viewer | 5fd46052984df5777fa57e140a8b37c1e226eb03 | [
"MIT"
] | null | null | null | import os
os.system("")
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
| 16.866667 | 25 | 0.521739 | import os
os.system("")
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
| true | true |
f73374d5193383680e6567226fe55556e50468a7 | 7,809 | py | Python | parsl/utils.py | aquanauts/parsl | 978bb483a4a41b3cef083aa242b2a78614a02dd0 | [
"Apache-2.0"
] | null | null | null | parsl/utils.py | aquanauts/parsl | 978bb483a4a41b3cef083aa242b2a78614a02dd0 | [
"Apache-2.0"
] | null | null | null | parsl/utils.py | aquanauts/parsl | 978bb483a4a41b3cef083aa242b2a78614a02dd0 | [
"Apache-2.0"
] | null | null | null | import inspect
import logging
import os
import shlex
import subprocess
import time
import typeguard
from contextlib import contextmanager
from typing import List
import parsl
from parsl.version import VERSION
logger = logging.getLogger(__name__)
@typeguard.typechecked
def get_version() -> str:
version = parsl.__version__
work_tree = os.path.dirname(os.path.dirname(__file__))
git_dir = os.path.join(work_tree, '.git')
if os.path.exists(git_dir):
env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
try:
cmd = shlex.split('git rev-parse --short HEAD')
head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
status = 'dirty' if diff else 'clean'
version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
except Exception:
pass
return version
@typeguard.typechecked
def get_all_checkpoints(rundir: str = "runinfo") -> List[str]:
"""Finds the checkpoints from all last runs.
Note that checkpoints are incremental, and this helper will not find
previous checkpoints from earlier than the most recent run. It probably
should be made to do so.
Kwargs:
- rundir(str) : Path to the runinfo directory
Returns:
- a list suitable for the checkpointFiles parameter of the DataFlowKernel
constructor
"""
if(not os.path.isdir(rundir)):
return []
dirs = sorted(os.listdir(rundir))
checkpoints = []
for runid in dirs:
checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))
if os.path.isdir(checkpoint):
checkpoints.append(checkpoint)
return checkpoints
@typeguard.typechecked
def get_last_checkpoint(rundir: str = "runinfo") -> List[str]:
"""Finds the checkpoint from the last run, if one exists.
Note that checkpoints are incremental, and this helper will not find
previous checkpoints from earlier than the most recent run. It probably
should be made to do so.
Kwargs:
- rundir(str) : Path to the runinfo directory
Returns:
- a list suitable for the checkpointFiles parameter of the DataFlowKernel
constructor, with 0 or 1 elements
"""
if not os.path.isdir(rundir):
return []
dirs = sorted(os.listdir(rundir))
if len(dirs) == 0:
return []
last_runid = dirs[-1]
last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))
if(not(os.path.isdir(last_checkpoint))):
return []
return [last_checkpoint]
def get_std_fname_mode(fdname, stdfspec):
import parsl.app.errors as pe
if stdfspec is None:
return None, None
elif isinstance(stdfspec, str):
fname = stdfspec
mode = 'a+'
elif isinstance(stdfspec, tuple):
if len(stdfspec) != 2:
raise pe.BadStdStreamFile("std descriptor %s has incorrect tuple length %s" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))
fname, mode = stdfspec
if not isinstance(fname, str) or not isinstance(mode, str):
raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))
else:
raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))
return fname, mode
@contextmanager
def wait_for_file(path, seconds=10):
for i in range(0, int(seconds * 100)):
time.sleep(seconds / 100.)
if os.path.exists(path):
break
yield
@contextmanager
def time_limited_open(path, mode, seconds=1):
with wait_for_file(path, seconds):
logger.debug("wait_for_file yielded")
f = open(path, mode)
yield f
f.close()
def wtime_to_minutes(time_string):
''' wtime_to_minutes
Convert standard wallclock time string to minutes.
Args:
- Time_string in HH:MM:SS format
Returns:
(int) minutes
'''
hours, mins, seconds = time_string.split(':')
total_mins = int(hours) * 60 + int(mins)
if total_mins < 1:
logger.warning("Time string '{}' parsed to {} minutes, less than 1".format(time_string, total_mins))
return total_mins
class RepresentationMixin(object):
"""A mixin class for adding a __repr__ method.
The __repr__ method will return a string equivalent to the code used to instantiate
the child class, with any defaults included explicitly. The __max_width__ class variable
controls the maximum width of the representation string. If this width is exceeded,
the representation string will be split up, with one argument or keyword argument per line.
Any arguments or keyword arguments in the constructor must be defined as attributes, or
an AttributeError will be raised.
Examples
--------
>>> from parsl.utils import RepresentationMixin
>>> class Foo(RepresentationMixin):
def __init__(self, first, second, third='three', fourth='fourth'):
self.first = first
self.second = second
self.third = third
self.fourth = fourth
>>> bar = Foo(1, 'two', fourth='baz')
>>> bar
Foo(1, 'two', third='three', fourth='baz')
"""
__max_width__ = 80
def __repr__(self):
init = self.__init__
# This test looks for a single layer of wrapping performed by
# functools.update_wrapper, commonly used in decorators. This will
# allow RepresentationMixin to see through a single such decorator
# applied to the __init__ method of a class, and find the underlying
# arguments. It will not see through multiple layers of such
# decorators, or cope with other decorators which do not use
# functools.update_wrapper.
if hasattr(init, '__wrapped__'):
init = init.__wrapped__
argspec = inspect.getfullargspec(init)
if len(argspec.args) > 1 and argspec.defaults is not None:
defaults = dict(zip(reversed(argspec.args), reversed(argspec.defaults)))
else:
defaults = {}
for arg in argspec.args[1:]:
if not hasattr(self, arg):
template = 'class {} uses {} in the constructor, but does not define it as an attribute'
raise AttributeError(template.format(self.__class__.__name__, arg))
if len(defaults) != 0:
args = [getattr(self, a) for a in argspec.args[1:-len(defaults)]]
else:
args = [getattr(self, a) for a in argspec.args[1:]]
kwargs = {key: getattr(self, key) for key in defaults}
def assemble_multiline(args, kwargs):
def indent(text):
lines = text.splitlines()
if len(lines) <= 1:
return text
return "\n".join(" " + l for l in lines).strip()
args = ["\n {},".format(indent(repr(a))) for a in args]
kwargs = ["\n {}={}".format(k, indent(repr(v)))
for k, v in sorted(kwargs.items())]
info = "".join(args) + ", ".join(kwargs)
return self.__class__.__name__ + "({}\n)".format(info)
def assemble_line(args, kwargs):
kwargs = ['{}={}'.format(k, repr(v)) for k, v in sorted(kwargs.items())]
info = ", ".join([repr(a) for a in args] + kwargs)
return self.__class__.__name__ + "({})".format(info)
if len(assemble_line(args, kwargs)) <= self.__class__.__max_width__:
return assemble_line(args, kwargs)
else:
return assemble_multiline(args, kwargs)
| 32.810924 | 145 | 0.631579 | import inspect
import logging
import os
import shlex
import subprocess
import time
import typeguard
from contextlib import contextmanager
from typing import List
import parsl
from parsl.version import VERSION
logger = logging.getLogger(__name__)
@typeguard.typechecked
def get_version() -> str:
version = parsl.__version__
work_tree = os.path.dirname(os.path.dirname(__file__))
git_dir = os.path.join(work_tree, '.git')
if os.path.exists(git_dir):
env = {'GIT_WORK_TREE': work_tree, 'GIT_DIR': git_dir}
try:
cmd = shlex.split('git rev-parse --short HEAD')
head = subprocess.check_output(cmd, env=env).strip().decode('utf-8')
diff = subprocess.check_output(shlex.split('git diff HEAD'), env=env)
status = 'dirty' if diff else 'clean'
version = '{v}-{head}-{status}'.format(v=VERSION, head=head, status=status)
except Exception:
pass
return version
@typeguard.typechecked
def get_all_checkpoints(rundir: str = "runinfo") -> List[str]:
if(not os.path.isdir(rundir)):
return []
dirs = sorted(os.listdir(rundir))
checkpoints = []
for runid in dirs:
checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, runid))
if os.path.isdir(checkpoint):
checkpoints.append(checkpoint)
return checkpoints
@typeguard.typechecked
def get_last_checkpoint(rundir: str = "runinfo") -> List[str]:
if not os.path.isdir(rundir):
return []
dirs = sorted(os.listdir(rundir))
if len(dirs) == 0:
return []
last_runid = dirs[-1]
last_checkpoint = os.path.abspath('{}/{}/checkpoint'.format(rundir, last_runid))
if(not(os.path.isdir(last_checkpoint))):
return []
return [last_checkpoint]
def get_std_fname_mode(fdname, stdfspec):
import parsl.app.errors as pe
if stdfspec is None:
return None, None
elif isinstance(stdfspec, str):
fname = stdfspec
mode = 'a+'
elif isinstance(stdfspec, tuple):
if len(stdfspec) != 2:
raise pe.BadStdStreamFile("std descriptor %s has incorrect tuple length %s" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))
fname, mode = stdfspec
if not isinstance(fname, str) or not isinstance(mode, str):
raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))
else:
raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))
return fname, mode
@contextmanager
def wait_for_file(path, seconds=10):
for i in range(0, int(seconds * 100)):
time.sleep(seconds / 100.)
if os.path.exists(path):
break
yield
@contextmanager
def time_limited_open(path, mode, seconds=1):
with wait_for_file(path, seconds):
logger.debug("wait_for_file yielded")
f = open(path, mode)
yield f
f.close()
def wtime_to_minutes(time_string):
hours, mins, seconds = time_string.split(':')
total_mins = int(hours) * 60 + int(mins)
if total_mins < 1:
logger.warning("Time string '{}' parsed to {} minutes, less than 1".format(time_string, total_mins))
return total_mins
class RepresentationMixin(object):
__max_width__ = 80
def __repr__(self):
init = self.__init__
if hasattr(init, '__wrapped__'):
init = init.__wrapped__
argspec = inspect.getfullargspec(init)
if len(argspec.args) > 1 and argspec.defaults is not None:
defaults = dict(zip(reversed(argspec.args), reversed(argspec.defaults)))
else:
defaults = {}
for arg in argspec.args[1:]:
if not hasattr(self, arg):
template = 'class {} uses {} in the constructor, but does not define it as an attribute'
raise AttributeError(template.format(self.__class__.__name__, arg))
if len(defaults) != 0:
args = [getattr(self, a) for a in argspec.args[1:-len(defaults)]]
else:
args = [getattr(self, a) for a in argspec.args[1:]]
kwargs = {key: getattr(self, key) for key in defaults}
def assemble_multiline(args, kwargs):
def indent(text):
lines = text.splitlines()
if len(lines) <= 1:
return text
return "\n".join(" " + l for l in lines).strip()
args = ["\n {},".format(indent(repr(a))) for a in args]
kwargs = ["\n {}={}".format(k, indent(repr(v)))
for k, v in sorted(kwargs.items())]
info = "".join(args) + ", ".join(kwargs)
return self.__class__.__name__ + "({}\n)".format(info)
def assemble_line(args, kwargs):
kwargs = ['{}={}'.format(k, repr(v)) for k, v in sorted(kwargs.items())]
info = ", ".join([repr(a) for a in args] + kwargs)
return self.__class__.__name__ + "({})".format(info)
if len(assemble_line(args, kwargs)) <= self.__class__.__max_width__:
return assemble_line(args, kwargs)
else:
return assemble_multiline(args, kwargs)
| true | true |
f7337506208d7ee643aa3b5d1018c89ab589e49e | 12,741 | py | Python | ParamGenerator/Spearmint/spearmint/utils/compression.py | Tabor-Research-Group/ChemOS | 50117f572e95e68dc4dccb624cedb28dbfc6e419 | [
"Apache-2.0"
] | 37 | 2018-03-20T21:23:11.000Z | 2022-03-26T08:19:20.000Z | ParamGenerator/Spearmint/spearmint/utils/compression.py | Tabor-Research-Group/ChemOS | 50117f572e95e68dc4dccb624cedb28dbfc6e419 | [
"Apache-2.0"
] | 1 | 2021-06-29T10:03:22.000Z | 2021-06-29T10:03:22.000Z | ParamGenerator/Spearmint/spearmint/utils/compression.py | Tabor-Research-Group/ChemOS | 50117f572e95e68dc4dccb624cedb28dbfc6e419 | [
"Apache-2.0"
] | 10 | 2018-05-16T21:04:05.000Z | 2021-10-15T18:14:06.000Z | # -*- coding: utf-8 -*-
# Spearmint
#
# Academic and Non-Commercial Research Use Software License and Terms
# of Use
#
# Spearmint is a software package to perform Bayesian optimization
# according to specific algorithms (the “Software”). The Software is
# designed to automatically run experiments (thus the code name
# 'spearmint') in a manner that iteratively adjusts a number of
# parameters so as to minimize some objective in as few runs as
# possible.
#
# The Software was developed by Ryan P. Adams, Michael Gelbart, and
# Jasper Snoek at Harvard University, Kevin Swersky at the
# University of Toronto (“Toronto”), and Hugo Larochelle at the
# Université de Sherbrooke (“Sherbrooke”), which assigned its rights
# in the Software to Socpra Sciences et Génie
# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement
# between the parties, it is distributed for free academic and
# non-commercial research use by the President and Fellows of Harvard
# College (“Harvard”).
#
# Using the Software indicates your agreement to be bound by the terms
# of this Software Use Agreement (“Agreement”). Absent your agreement
# to the terms below, you (the “End User”) have no rights to hold or
# use the Software whatsoever.
#
# Harvard agrees to grant hereunder the limited non-exclusive license
# to End User for the use of the Software in the performance of End
# User’s internal, non-commercial research and academic use at End
# User’s academic or not-for-profit research institution
# (“Institution”) on the following terms and conditions:
#
# 1. NO REDISTRIBUTION. The Software remains the property Harvard,
# Toronto and Socpra, and except as set forth in Section 4, End User
# shall not publish, distribute, or otherwise transfer or make
# available the Software to any other party.
#
# 2. NO COMMERCIAL USE. End User shall not use the Software for
# commercial purposes and any such use of the Software is expressly
# prohibited. This includes, but is not limited to, use of the
# Software in fee-for-service arrangements, core facilities or
# laboratories or to provide research services to (or in collaboration
# with) third parties for a fee, and in industry-sponsored
# collaborative research projects where any commercial rights are
# granted to the sponsor. If End User wishes to use the Software for
# commercial purposes or for any other restricted purpose, End User
# must execute a separate license agreement with Harvard.
#
# Requests for use of the Software for commercial purposes, please
# contact:
#
# Office of Technology Development
# Harvard University
# Smith Campus Center, Suite 727E
# 1350 Massachusetts Avenue
# Cambridge, MA 02138 USA
# Telephone: (617) 495-3067
# Facsimile: (617) 495-9568
# E-mail: otd@harvard.edu
#
# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own
# all intellectual property in the Software. End User shall gain no
# ownership to the Software. End User shall not remove or delete and
# shall retain in the Software, in any modifications to Software and
# in any Derivative Works, the copyright, trademark, or other notices
# pertaining to Software as provided with the Software.
#
# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,
# as such term is defined under U.S. copyright laws, provided that any
# such Derivative Works shall be restricted to non-commercial,
# internal research and academic use at End User’s Institution. End
# User may distribute Derivative Works to other Institutions solely
# for the performance of non-commercial, internal research and
# academic use on terms substantially similar to this License and
# Terms of Use.
#
# 5. FEEDBACK. In order to improve the Software, comments from End
# Users may be useful. End User agrees to provide Harvard with
# feedback on the End User’s use of the Software (e.g., any bugs in
# the Software, the user experience, etc.). Harvard is permitted to
# use such information provided by End User in making changes and
# improvements to the Software without compensation or an accounting
# to End User.
#
# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or
# Sherbrooke or Socpra may develop modifications to the Software that
# may be based on the feedback provided by End User under Section 5
# above. Harvard, Toronto and Sherbrooke/Socpra shall not be
# restricted in any way by End User regarding their use of such
# information. End User acknowledges the right of Harvard, Toronto
# and Sherbrooke/Socpra to prepare, publish, display, reproduce,
# transmit and or use modifications to the Software that may be
# substantially similar or functionally equivalent to End User’s
# modifications and/or improvements if any. In the event that End
# User obtains patent protection for any modification or improvement
# to Software, End User agrees not to allege or enjoin infringement of
# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,
# or any of the researchers, medical or research staff, officers,
# directors and employees of those institutions.
#
# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,
# present, or share results from the use of the Software. In
# accordance with customary academic practice, End User will
# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers
# of the Software and may cite the relevant reference(s) from the
# following list of publications:
#
# Practical Bayesian Optimization of Machine Learning Algorithms
# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams
# Neural Information Processing Systems, 2012
#
# Multi-Task Bayesian Optimization
# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams
# Advances in Neural Information Processing Systems, 2013
#
# Input Warping for Bayesian Optimization of Non-stationary Functions
# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams
# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013
#
# Bayesian Optimization and Semiparametric Models with Applications to
# Assistive Technology Jasper Snoek, PhD Thesis, University of
# Toronto, 2013
#
# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED "AS IS." TO THE FULLEST
# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA
# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR
# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND
# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,
# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE
# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT
# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.
#
# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT
# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,
# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL
# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR
# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,
# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER
# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH
# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS
# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,
# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGES.
#
# 10. INDEMNIFICATION. To the extent permitted by law, End User shall
# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke
# and Socpra, their corporate affiliates, current or future directors,
# trustees, officers, faculty, medical and professional staff,
# employees, students and agents and their respective successors,
# heirs and assigns (the "Indemnitees"), against any liability,
# damage, loss or expense (including reasonable attorney's fees and
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import zlib
import numpy as np
COMPRESS_TYPE = 'compressed array'
# TODO: see if there is a better way to encode this than base64
# It takes about 0.65 seconds to compress a 1000x1000 array on a 2011 Macbook air
def compress_array(a):
return {'ctype' : COMPRESS_TYPE,
'shape' : list(a.shape),
'value' : (zlib.compress(a))}#.encode('base64'))}
# It takes about 0.15 seconds to decompress a 1000x1000 array on a 2011 Macbook air
def decompress_array(a):
# return np.fromstring(zlib.decompress(a['value'].decode('base64'))).reshape(a['shape'])
return np.fromstring(zlib.decompress(a['value'])).reshape(a['shape'])
def compress_nested_container(u_container):
if isinstance(u_container, dict):
cdict = {}
for key, value in u_container.items():
if isinstance(value, dict) or isinstance(value, list):
cdict[key] = compress_nested_container(value)
else:
if isinstance(value, np.ndarray):
cdict[key] = compress_array(value)
else:
cdict[key] = value
return cdict
elif isinstance(u_container, list):
clist = []
for value in u_container:
if isinstance(value, dict) or isinstance(value, list):
clist.append(compress_nested_container(value))
else:
if isinstance(value, np.ndarray):
clist.append(compress_array(value))
else:
clist.append(value)
return clist
def decompress_nested_container(c_container):
if isinstance(c_container, dict):
# if c_container.has_key('ctype') and c_container['ctype'] == COMPRESS_TYPE:
if 'ctype' in c_container.keys() and c_container['ctype'] == COMPRESS_TYPE:
try:
return decompress_array(c_container)
except:
raise Exception('Container does not contain a valid array.')
else:
udict = {}
for key, value in c_container.items():
if isinstance(value, dict) or isinstance(value, list):
udict[key] = decompress_nested_container(value)
else:
udict[key] = value
return udict
elif isinstance(c_container, list):
ulist = []
for value in c_container:
if isinstance(value, dict) or isinstance(value, list):
ulist.append(decompress_nested_container(value))
else:
ulist.append(value)
return ulist
def test_compression():
b = np.random.randn(10)
c = np.random.randn(5,1)
e = np.random.randn(2,3)
f = np.random.randn(1,2)
g = np.random.randn(4,2,3)
d = {'a': {'b': b, 'c': c}, 'e': [e,[f,g]]}
dc = compress_nested_container(d)
du = decompress_nested_container(dc)
v1 = [d['a']['b'], d['a']['c'], d['e'][0], d['e'][1][0], d['e'][1][1]]
v2 = [du['a']['b'], du['a']['c'], du['e'][0], du['e'][1][0], du['e'][1][1]]
comp = [np.all(i==j) for i,j in zip(v1,v2)]
return np.all(comp)
if __name__ == '__main__':
test_compression()
| 46.163043 | 90 | 0.728828 |
# expenses of litigation) incurred by or imposed upon the Indemnitees
# or any one of them in connection with any claims, suits, actions,
# demands or judgments arising from End User’s breach of this
# Agreement or its Institution’s use of the Software except to the
# extent caused by the gross negligence or willful misconduct of
# Harvard, Toronto or Sherbrooke or Socpra. This indemnification
# provision shall survive expiration or termination of this Agreement.
#
# 11. GOVERNING LAW. This Agreement shall be construed and governed by
# the laws of the Commonwealth of Massachusetts regardless of
# otherwise applicable choice of law standards.
#
# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall
# be construed as granting End Users or their Institutions any rights
# or licenses to use any trademarks, service marks or logos associated
# with the Software. You may not use the terms “Harvard” or
# “University of Toronto” or “Université de Sherbrooke” or “Socpra
# Sciences et Génie S.E.C.” (or a substantially similar term) in any
# way that is inconsistent with the permitted uses described
# herein. You agree not to use any name or emblem of Harvard, Toronto
# or Sherbrooke, or any of their subdivisions for any purpose, or to
# falsely suggest any relationship between End User (or its
# Institution) and Harvard, Toronto and/or Sherbrooke, or in any
# manner that would infringe or violate any of their rights.
#
# 13. End User represents and warrants that it has the legal authority
# to enter into this License and Terms of Use on behalf of itself and
# its Institution.
import zlib
import numpy as np
COMPRESS_TYPE = 'compressed array'
# TODO: see if there is a better way to encode this than base64
# It takes about 0.65 seconds to compress a 1000x1000 array on a 2011 Macbook air
def compress_array(a):
return {'ctype' : COMPRESS_TYPE,
'shape' : list(a.shape),
'value' : (zlib.compress(a))}#.encode('base64'))}
# It takes about 0.15 seconds to decompress a 1000x1000 array on a 2011 Macbook air
def decompress_array(a):
# return np.fromstring(zlib.decompress(a['value'].decode('base64'))).reshape(a['shape'])
return np.fromstring(zlib.decompress(a['value'])).reshape(a['shape'])
def compress_nested_container(u_container):
if isinstance(u_container, dict):
cdict = {}
for key, value in u_container.items():
if isinstance(value, dict) or isinstance(value, list):
cdict[key] = compress_nested_container(value)
else:
if isinstance(value, np.ndarray):
cdict[key] = compress_array(value)
else:
cdict[key] = value
return cdict
elif isinstance(u_container, list):
clist = []
for value in u_container:
if isinstance(value, dict) or isinstance(value, list):
clist.append(compress_nested_container(value))
else:
if isinstance(value, np.ndarray):
clist.append(compress_array(value))
else:
clist.append(value)
return clist
def decompress_nested_container(c_container):
if isinstance(c_container, dict):
# if c_container.has_key('ctype') and c_container['ctype'] == COMPRESS_TYPE:
if 'ctype' in c_container.keys() and c_container['ctype'] == COMPRESS_TYPE:
try:
return decompress_array(c_container)
except:
raise Exception('Container does not contain a valid array.')
else:
udict = {}
for key, value in c_container.items():
if isinstance(value, dict) or isinstance(value, list):
udict[key] = decompress_nested_container(value)
else:
udict[key] = value
return udict
elif isinstance(c_container, list):
ulist = []
for value in c_container:
if isinstance(value, dict) or isinstance(value, list):
ulist.append(decompress_nested_container(value))
else:
ulist.append(value)
return ulist
def test_compression():
b = np.random.randn(10)
c = np.random.randn(5,1)
e = np.random.randn(2,3)
f = np.random.randn(1,2)
g = np.random.randn(4,2,3)
d = {'a': {'b': b, 'c': c}, 'e': [e,[f,g]]}
dc = compress_nested_container(d)
du = decompress_nested_container(dc)
v1 = [d['a']['b'], d['a']['c'], d['e'][0], d['e'][1][0], d['e'][1][1]]
v2 = [du['a']['b'], du['a']['c'], du['e'][0], du['e'][1][0], du['e'][1][1]]
comp = [np.all(i==j) for i,j in zip(v1,v2)]
return np.all(comp)
if __name__ == '__main__':
test_compression()
| true | true |
f73375827f0ed1a1d4b504b151f3741086c92efb | 18,951 | py | Python | tools/interfacedocgen.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 7 | 2017-02-17T08:54:26.000Z | 2022-03-10T20:57:23.000Z | tools/interfacedocgen.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 2 | 2018-04-17T19:18:16.000Z | 2020-03-04T22:05:02.000Z | tools/interfacedocgen.py | abelalez/nipype | 878271bd906768f11c4cabd04e5d1895551ce8a7 | [
"Apache-2.0"
] | 2 | 2017-09-23T16:22:00.000Z | 2019-08-01T14:18:52.000Z | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Attempt to generate templates for module reference with Sphinx
XXX - we exclude extension modules
To include extension modules, first identify them as valid in the
``_uri2path`` method, then handle them in the ``_parse_module`` script.
We get functions and classes by parsing the text of .py files.
Alternatively we could import the modules for discovery, and we'd have
to do that for extension modules. This would involve changing the
``_parse_module`` method to work via import and introspection, and
might involve changing ``discover_modules`` (which determines which
files are modules, and therefore which module URIs will be passed to
``_parse_module``).
NOTE: this is a modified version of a script originally shipped with the
PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed
project."""
from __future__ import print_function, unicode_literals
from builtins import object, open
# Stdlib imports
import inspect
import os
import re
import sys
import tempfile
import warnings
from nipype.interfaces.base import BaseInterface
from nipype.pipeline.engine import Workflow
from nipype.utils.misc import trim
from github import get_file_url
# Functions and classes
class InterfaceHelpWriter(object):
''' Class for automatic detection and parsing of API docs
to Sphinx-parsable reST format'''
# only separating first two levels
rst_section_levels = ['*', '=', '-', '~', '^']
def __init__(self,
package_name,
rst_extension='.rst',
package_skip_patterns=None,
module_skip_patterns=None,
class_skip_patterns=None):
''' Initialize package for parsing
Parameters
----------
package_name : string
Name of the top-level package. *package_name* must be the
name of an importable package
rst_extension : string, optional
Extension for reST files, default '.rst'
package_skip_patterns : None or sequence of {strings, regexps}
Sequence of strings giving URIs of packages to be excluded
Operates on the package path, starting at (including) the
first dot in the package path, after *package_name* - so,
if *package_name* is ``sphinx``, then ``sphinx.util`` will
result in ``.util`` being passed for earching by these
regexps. If is None, gives default. Default is:
['\.tests$']
module_skip_patterns : None or sequence
Sequence of strings giving URIs of modules to be excluded
Operates on the module name including preceding URI path,
back to the first dot after *package_name*. For example
``sphinx.util.console`` results in the string to search of
``.util.console``
If is None, gives default. Default is:
['\.setup$', '\._']
class_skip_patterns : None or sequence
Sequence of strings giving classes to be excluded
Default is: None
'''
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
if class_skip_patterns:
self.class_skip_patterns = class_skip_patterns
else:
self.class_skip_patterns = []
self.package_name = package_name
self.rst_extension = rst_extension
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
''' Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True
'''
# It's also possible to imagine caching the module parsing here
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
self.written_modules = None
package_name = property(get_package_name, set_package_name, None,
'get/set package_name')
def _get_object_name(self, line):
''' Get second token in line
>>> docwriter = ApiDocWriter('sphinx')
>>> docwriter._get_object_name(" def func(): ")
u'func'
>>> docwriter._get_object_name(" class Klass(object): ")
'Klass'
>>> docwriter._get_object_name(" class Klass: ")
'Klass'
'''
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
''' Convert uri to absolute filepath
Parameters
----------
uri : string
URI of python module to return path for
Returns
-------
path : None or string
Returns None if there is no valid path for this URI
Otherwise returns absolute file system path for URI
Examples
--------
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> modpath = sphinx.__path__[0]
>>> res = docwriter._uri2path('sphinx.builder')
>>> res == os.path.join(modpath, 'builder.py')
True
>>> res = docwriter._uri2path('sphinx')
>>> res == os.path.join(modpath, '__init__.py')
True
>>> docwriter._uri2path('sphinx.does_not_exist')
'''
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
''' Parse module defined in *uri* '''
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([], [])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f, uri)
f.close()
return functions, classes
def _parse_lines(self, linesource, module):
''' Parse lines of text for functions and classes '''
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_') and \
self._survives_exclude('.'.join((module, name)),
'class'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def _write_graph_section(self, fname, title):
ad = '\n%s\n%s\n\n' % (title, self.rst_section_levels[3] * len(title))
ad += '.. graphviz::\n\n'
fhandle = open(fname)
for line in fhandle:
ad += '\t' + line + '\n'
fhandle.close()
os.remove(fname)
bitmap_fname = '{}.png'.format(os.path.splitext(fname)[0])
os.remove(bitmap_fname)
return ad
def generate_api_doc(self, uri):
'''Make autodoc documentation template string for a module
Parameters
----------
uri : string
python location of module - e.g 'sphinx.builder'
Returns
-------
S : string
Contents of API doc
'''
# get the names of all classes and functions
functions, classes = self._parse_module(uri)
workflows = []
helper_functions = []
for function in functions:
try:
__import__(uri)
finst = sys.modules[uri].__dict__[function]
except TypeError:
continue
try:
workflow = finst()
except Exception:
helper_functions.append((function, finst))
continue
if isinstance(workflow, Workflow):
workflows.append((workflow, function, finst))
if not classes and not workflows and not helper_functions:
print('WARNING: Empty -', uri) # dbg
return ''
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name, '', uri)
# uri_short = uri
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
chap_title = uri_short
ad += (chap_title + '\n' +
self.rst_section_levels[1] * len(chap_title) + '\n\n')
# Set the chapter title to read 'module' for all modules except for the
# main packages
# if '.' in uri:
# title = 'Module: :mod:`' + uri_short + '`'
# else:
# title = ':mod:`' + uri_short + '`'
# ad += title + '\n' + self.rst_section_levels[2] * len(title)
# ad += '\n' + 'Classes' + '\n' + \
# self.rst_section_levels[2] * 7 + '\n'
for c in classes:
__import__(uri)
print(c)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classinst = sys.modules[uri].__dict__[c]
except Exception as inst:
print(inst)
continue
if not issubclass(classinst, BaseInterface):
continue
label = uri + '.' + c + ':'
ad += '\n.. _%s\n\n' % label
ad += '\n.. index:: %s\n\n' % c
ad += c + '\n' + self.rst_section_levels[2] * len(c) + '\n\n'
ad += "`Link to code <%s>`__\n\n" % get_file_url(classinst)
ad += trim(
classinst.help(returnhelp=True),
self.rst_section_levels[3]) + '\n'
if workflows or helper_functions:
ad += '\n.. module:: %s\n\n' % uri
for workflow, name, finst in workflows:
label = ':func:`' + name + '`'
ad += '\n.. _%s:\n\n' % (uri + '.' + name)
ad += '\n'.join((label, self.rst_section_levels[2] * len(label)))
ad += "\n\n`Link to code <%s>`__\n\n" % get_file_url(finst)
helpstr = trim(finst.__doc__, self.rst_section_levels[3])
ad += '\n\n' + helpstr + '\n\n'
"""
# use sphinx autodoc for function signature
ad += '\n.. _%s:\n\n' % (uri + '.' + name)
ad += '.. autofunction:: %s\n\n' % name
"""
(_, fname) = tempfile.mkstemp(suffix=".dot")
workflow.write_graph(dotfilename=fname, graph2use='hierarchical')
ad += self._write_graph_section(fname, 'Graph') + '\n'
for name, finst in helper_functions:
label = ':func:`' + name + '`'
ad += '\n.. _%s:\n\n' % (uri + '.' + name)
ad += '\n'.join((label, self.rst_section_levels[2] * len(label)))
ad += "\n\n`Link to code <%s>`__\n\n" % get_file_url(finst)
helpstr = trim(finst.__doc__, self.rst_section_levels[3])
ad += '\n\n' + helpstr + '\n\n'
return ad
def _survives_exclude(self, matchstr, match_type):
''' Returns True if *matchstr* does not match patterns
``self.package_name`` removed from front of string if present
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> dw._survives_exclude('sphinx.okpkg', 'package')
True
>>> dw.package_skip_patterns.append('^\\.badpkg$')
>>> dw._survives_exclude('sphinx.badpkg', 'package')
False
>>> dw._survives_exclude('sphinx.badpkg', 'module')
True
>>> dw._survives_exclude('sphinx.badmod', 'module')
True
>>> dw.module_skip_patterns.append('^\\.badmod$')
>>> dw._survives_exclude('sphinx.badmod', 'module')
False
'''
if match_type == 'module':
patterns = self.module_skip_patterns
elif match_type == 'package':
patterns = self.package_skip_patterns
elif match_type == 'class':
patterns = self.class_skip_patterns
else:
raise ValueError('Cannot interpret match type "%s"' % match_type)
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
''' Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
>>> dw = ApiDocWriter('sphinx')
>>> mods = dw.discover_modules()
>>> 'sphinx.util' in mods
True
>>> dw.package_skip_patterns.append('\.util$')
>>> 'sphinx.util' in dw.discover_modules()
False
>>>
'''
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path, dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if (self._uri2path(package_uri)
and self._survives_exclude(package_uri, 'package')):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = '.'.join((root_uri, module_name))
if (self._uri2path(module_uri)
and self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules)
def write_modules_api(self, modules, outdir):
# write the list
written_modules = []
for m in modules:
api_str = self.generate_api_doc(m)
if not api_str:
continue
# write out to file
mvalues = m.split('.')
if len(mvalues) > 3:
index_prefix = '.'.join(mvalues[1:3])
index_dir = os.path.join(outdir, index_prefix)
index_file = index_dir + self.rst_extension
if not os.path.exists(index_dir):
os.makedirs(index_dir)
header = """.. AUTO-GENERATED FILE -- DO NOT EDIT!
{name}
{underline}
.. toctree::
:maxdepth: 1
:glob:
{name}/*
""".format(
name=index_prefix, underline='=' * len(index_prefix))
with open(index_file, 'wt') as fp:
fp.write(header)
m = os.path.join(index_prefix, '.'.join(mvalues[3:]))
outfile = os.path.join(outdir, m + self.rst_extension)
fileobj = open(outfile, 'wt')
fileobj.write(api_str)
fileobj.close()
written_modules.append(m)
self.written_modules = written_modules
def write_api_docs(self, outdir):
"""Generate API reST files.
Parameters
----------
outdir : string
Directory name in which to store files
We create automatic filenames for each module
Returns
-------
None
Notes
-----
Sets self.written_modules to list of written modules
"""
if not os.path.exists(outdir):
os.mkdir(outdir)
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules, outdir)
def write_index(self, outdir, froot='gen', relative_to=None):
"""Make a reST API index file from written files
Parameters
----------
path : string
Filename to write index to
outdir : string
Directory to which to write generated index file
froot : string, optional
root (filename without extension) of filename to write to
Defaults to 'gen'. We add ``self.rst_extension``.
relative_to : string
path to which written filenames are relative. This
component of the written file path will be removed from
outdir, in the generated index. Default is None, meaning,
leave path as it is.
"""
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
path = os.path.join(outdir, froot + self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
relpath = outdir.replace(relative_to + os.path.sep, '')
else:
relpath = outdir
idx = open(path, 'wt')
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
w('.. toctree::\n')
w(' :maxdepth: 2\n\n')
for f in self.written_modules:
w(' %s\n' % os.path.join(relpath, f))
idx.close()
| 35.892045 | 79 | 0.554905 |
from __future__ import print_function, unicode_literals
from builtins import object, open
import inspect
import os
import re
import sys
import tempfile
import warnings
from nipype.interfaces.base import BaseInterface
from nipype.pipeline.engine import Workflow
from nipype.utils.misc import trim
from github import get_file_url
class InterfaceHelpWriter(object):
rst_section_levels = ['*', '=', '-', '~', '^']
def __init__(self,
package_name,
rst_extension='.rst',
package_skip_patterns=None,
module_skip_patterns=None,
class_skip_patterns=None):
if package_skip_patterns is None:
package_skip_patterns = ['\\.tests$']
if module_skip_patterns is None:
module_skip_patterns = ['\\.setup$', '\\._']
if class_skip_patterns:
self.class_skip_patterns = class_skip_patterns
else:
self.class_skip_patterns = []
self.package_name = package_name
self.rst_extension = rst_extension
self.package_skip_patterns = package_skip_patterns
self.module_skip_patterns = module_skip_patterns
def get_package_name(self):
return self._package_name
def set_package_name(self, package_name):
self._package_name = package_name
self.root_module = __import__(package_name)
self.root_path = self.root_module.__path__[0]
self.written_modules = None
package_name = property(get_package_name, set_package_name, None,
'get/set package_name')
def _get_object_name(self, line):
name = line.split()[1].split('(')[0].strip()
# in case we have classes which are not derived from object
# ie. old style classes
return name.rstrip(':')
def _uri2path(self, uri):
if uri == self.package_name:
return os.path.join(self.root_path, '__init__.py')
path = uri.replace('.', os.path.sep)
path = path.replace(self.package_name + os.path.sep, '')
path = os.path.join(self.root_path, path)
# XXX maybe check for extensions as well?
if os.path.exists(path + '.py'): # file
path += '.py'
elif os.path.exists(os.path.join(path, '__init__.py')):
path = os.path.join(path, '__init__.py')
else:
return None
return path
def _path2uri(self, dirpath):
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.')
def _parse_module(self, uri):
filename = self._uri2path(uri)
if filename is None:
# nothing that we could handle here.
return ([], [])
f = open(filename, 'rt')
functions, classes = self._parse_lines(f, uri)
f.close()
return functions, classes
def _parse_lines(self, linesource, module):
functions = []
classes = []
for line in linesource:
if line.startswith('def ') and line.count('('):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_'):
functions.append(name)
elif line.startswith('class '):
# exclude private stuff
name = self._get_object_name(line)
if not name.startswith('_') and \
self._survives_exclude('.'.join((module, name)),
'class'):
classes.append(name)
else:
pass
functions.sort()
classes.sort()
return functions, classes
def _write_graph_section(self, fname, title):
ad = '\n%s\n%s\n\n' % (title, self.rst_section_levels[3] * len(title))
ad += '.. graphviz::\n\n'
fhandle = open(fname)
for line in fhandle:
ad += '\t' + line + '\n'
fhandle.close()
os.remove(fname)
bitmap_fname = '{}.png'.format(os.path.splitext(fname)[0])
os.remove(bitmap_fname)
return ad
def generate_api_doc(self, uri):
# get the names of all classes and functions
functions, classes = self._parse_module(uri)
workflows = []
helper_functions = []
for function in functions:
try:
__import__(uri)
finst = sys.modules[uri].__dict__[function]
except TypeError:
continue
try:
workflow = finst()
except Exception:
helper_functions.append((function, finst))
continue
if isinstance(workflow, Workflow):
workflows.append((workflow, function, finst))
if not classes and not workflows and not helper_functions:
print('WARNING: Empty -', uri) # dbg
return ''
# Make a shorter version of the uri that omits the package name for
# titles
uri_short = re.sub(r'^%s\.' % self.package_name, '', uri)
# uri_short = uri
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
chap_title = uri_short
ad += (chap_title + '\n' +
self.rst_section_levels[1] * len(chap_title) + '\n\n')
# Set the chapter title to read 'module' for all modules except for the
# main packages
# if '.' in uri:
# title = 'Module: :mod:`' + uri_short + '`'
# else:
# title = ':mod:`' + uri_short + '`'
# ad += title + '\n' + self.rst_section_levels[2] * len(title)
# ad += '\n' + 'Classes' + '\n' + \
# self.rst_section_levels[2] * 7 + '\n'
for c in classes:
__import__(uri)
print(c)
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
classinst = sys.modules[uri].__dict__[c]
except Exception as inst:
print(inst)
continue
if not issubclass(classinst, BaseInterface):
continue
label = uri + '.' + c + ':'
ad += '\n.. _%s\n\n' % label
ad += '\n.. index:: %s\n\n' % c
ad += c + '\n' + self.rst_section_levels[2] * len(c) + '\n\n'
ad += "`Link to code <%s>`__\n\n" % get_file_url(classinst)
ad += trim(
classinst.help(returnhelp=True),
self.rst_section_levels[3]) + '\n'
if workflows or helper_functions:
ad += '\n.. module:: %s\n\n' % uri
for workflow, name, finst in workflows:
label = ':func:`' + name + '`'
ad += '\n.. _%s:\n\n' % (uri + '.' + name)
ad += '\n'.join((label, self.rst_section_levels[2] * len(label)))
ad += "\n\n`Link to code <%s>`__\n\n" % get_file_url(finst)
helpstr = trim(finst.__doc__, self.rst_section_levels[3])
ad += '\n\n' + helpstr + '\n\n'
(_, fname) = tempfile.mkstemp(suffix=".dot")
workflow.write_graph(dotfilename=fname, graph2use='hierarchical')
ad += self._write_graph_section(fname, 'Graph') + '\n'
for name, finst in helper_functions:
label = ':func:`' + name + '`'
ad += '\n.. _%s:\n\n' % (uri + '.' + name)
ad += '\n'.join((label, self.rst_section_levels[2] * len(label)))
ad += "\n\n`Link to code <%s>`__\n\n" % get_file_url(finst)
helpstr = trim(finst.__doc__, self.rst_section_levels[3])
ad += '\n\n' + helpstr + '\n\n'
return ad
def _survives_exclude(self, matchstr, match_type):
if match_type == 'module':
patterns = self.module_skip_patterns
elif match_type == 'package':
patterns = self.package_skip_patterns
elif match_type == 'class':
patterns = self.class_skip_patterns
else:
raise ValueError('Cannot interpret match type "%s"' % match_type)
# Match to URI without package name
L = len(self.package_name)
if matchstr[:L] == self.package_name:
matchstr = matchstr[L:]
for pat in patterns:
try:
pat.search
except AttributeError:
pat = re.compile(pat)
if pat.search(matchstr):
return False
return True
def discover_modules(self):
modules = [self.package_name]
# raw directory parsing
for dirpath, dirnames, filenames in os.walk(self.root_path):
# Check directory names for packages
root_uri = self._path2uri(os.path.join(self.root_path, dirpath))
for dirname in dirnames[:]: # copy list - we modify inplace
package_uri = '.'.join((root_uri, dirname))
if (self._uri2path(package_uri)
and self._survives_exclude(package_uri, 'package')):
modules.append(package_uri)
else:
dirnames.remove(dirname)
# Check filenames for modules
for filename in filenames:
module_name = filename[:-3]
module_uri = '.'.join((root_uri, module_name))
if (self._uri2path(module_uri)
and self._survives_exclude(module_uri, 'module')):
modules.append(module_uri)
return sorted(modules)
def write_modules_api(self, modules, outdir):
# write the list
written_modules = []
for m in modules:
api_str = self.generate_api_doc(m)
if not api_str:
continue
# write out to file
mvalues = m.split('.')
if len(mvalues) > 3:
index_prefix = '.'.join(mvalues[1:3])
index_dir = os.path.join(outdir, index_prefix)
index_file = index_dir + self.rst_extension
if not os.path.exists(index_dir):
os.makedirs(index_dir)
header = """.. AUTO-GENERATED FILE -- DO NOT EDIT!
{name}
{underline}
.. toctree::
:maxdepth: 1
:glob:
{name}/*
""".format(
name=index_prefix, underline='=' * len(index_prefix))
with open(index_file, 'wt') as fp:
fp.write(header)
m = os.path.join(index_prefix, '.'.join(mvalues[3:]))
outfile = os.path.join(outdir, m + self.rst_extension)
fileobj = open(outfile, 'wt')
fileobj.write(api_str)
fileobj.close()
written_modules.append(m)
self.written_modules = written_modules
def write_api_docs(self, outdir):
if not os.path.exists(outdir):
os.mkdir(outdir)
# compose list of modules
modules = self.discover_modules()
self.write_modules_api(modules, outdir)
def write_index(self, outdir, froot='gen', relative_to=None):
if self.written_modules is None:
raise ValueError('No modules written')
# Get full filename path
path = os.path.join(outdir, froot + self.rst_extension)
# Path written into index is relative to rootpath
if relative_to is not None:
relpath = outdir.replace(relative_to + os.path.sep, '')
else:
relpath = outdir
idx = open(path, 'wt')
w = idx.write
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
w('.. toctree::\n')
w(' :maxdepth: 2\n\n')
for f in self.written_modules:
w(' %s\n' % os.path.join(relpath, f))
idx.close()
| true | true |
f733758c28429f084545854d7df87417d20a7c1b | 12,589 | py | Python | unlp/unsupervised/Word2Vec/get_file.py | Hanscal/unlp | 93a630cac7957f1ddd38f34403ec6577a277e10a | [
"MIT"
] | 8 | 2022-02-23T08:41:26.000Z | 2022-03-14T11:42:51.000Z | unlp/unsupervised/Word2Vec/get_file.py | Hanscal/unlp | 93a630cac7957f1ddd38f34403ec6577a277e10a | [
"MIT"
] | null | null | null | unlp/unsupervised/Word2Vec/get_file.py | Hanscal/unlp | 93a630cac7957f1ddd38f34403ec6577a277e10a | [
"MIT"
] | 2 | 2022-03-09T01:50:40.000Z | 2022-03-21T09:23:09.000Z | # -*- coding: utf-8 -*-
"""
@description: Download file.
"""
import hashlib
import os
import shutil
import sys
import tarfile
import time
import typing
import zipfile
from pathlib import Path
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlretrieve
class Progbar(object):
"""
Displays a progress bar.
:param target: Total number of steps expected, None if unknown.
:param width: Progress bar width on screen.
:param verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
:param stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
:param interval: Minimum visual progress update interval (in seconds).
"""
def __init__(
self,
target,
width=30,
verbose=1,
interval=0.05,
):
"""Init."""
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self._dynamic_display = ((hasattr(sys.stdout,
'isatty') and sys.stdout.isatty()
) or 'ipykernel' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
self._start = time.time()
self._last_update = 0
def update(self, current):
"""Updates the progress bar."""
self._seen_so_far = current
now = time.time()
info = ' - {0:.0f}s'.format(now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and self.target is not
None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
bar = '{2:{0:d}d}/{1} ['.format(
numdigits, self.target, current)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '{0:7d}/Unknown'.format(current)
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = int(time_per_unit * (self.target - current))
if eta > 3600:
eta_format = ('{0:d}:{1:02d}:{2:02d}'.format(
eta // 3600, (eta % 3600) // 60, eta % 60))
elif eta > 60:
eta_format = '{0:d}:{1:02d}'.format(eta // 60, eta % 60)
else:
eta_format = '{0:d}s'.format(eta)
info = ' - ETA: {0}'.format(eta_format)
else:
if time_per_unit >= 1:
info += ' {0:.0f}s/step'.format(time_per_unit)
elif time_per_unit >= 1e-3:
info += ' {0:.0f}ms/step'.format(time_per_unit * 1e3)
else:
info += ' {0:.0f}us/step'.format(time_per_unit * 1e6)
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def _extract_archive(file_path, path='.', archive_format='auto'):
"""
Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
:param file_path: path to the archive file
:param path: path to extract the archive file
:param archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
:return: True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(
fname: str = None,
origin: str = None,
untar: bool = False,
extract: bool = False,
md5_hash: typing.Any = None,
file_hash: typing.Any = None,
hash_algorithm: str = 'auto',
archive_format: str = 'auto',
cache_subdir: typing.Union[Path, str] = 'data',
cache_dir: typing.Union[Path, str] = 'dataset',
verbose: int = 1
) -> str:
"""
Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.project/datasets`, placed in the cache_subdir `data`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.project/datasets/data/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
:param fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
:param origin: Original URL of the file.
:param untar: Deprecated in favor of 'extract'. Boolean, whether the file
should be decompressed.
:param md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file
for verification.
:param file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
:param cache_subdir: Subdirectory under the cache dir where the file is
saved. If an absolute path `/path/to/folder` is specified the file
will be saved at that location.
:param hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'. The default 'auto' detects
the hash algorithm in use.
:papram extract: True tries extracting the file as an Archive, like tar
or zip.
:param archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
:param cache_dir: Location to store cached files, when None it defaults to
the [project.USER_DATA_DIR](~/.project/datasets).
:param verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
:return: Path to the downloaded file.
"""
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.text2vec')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the file hash '
'does not match the original value of file_hash.'
' We will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(
target=total_size, verbose=verbose)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {} : {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""
Validates a file against a sha256 or md5 hash.
:param fpath: path to the file being validated
:param file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
:param algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
:param chunk_size: Bytes to read at a time, important for large files.
:return: Whether the file is valid.
"""
if ((algorithm == 'sha256') or (algorithm == 'auto' and len(
file_hash) == 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
def hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""
Calculates a file sha256 or md5 hash.
:param fpath: path to the file being validated
:param algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
:param chunk_size: Bytes to read at a time, important for large files.
:return: The file hash.
"""
if algorithm == 'sha256':
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
| 35.866097 | 79 | 0.57447 |
import hashlib
import os
import shutil
import sys
import tarfile
import time
import typing
import zipfile
from pathlib import Path
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlretrieve
class Progbar(object):
def __init__(
self,
target,
width=30,
verbose=1,
interval=0.05,
):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self._dynamic_display = ((hasattr(sys.stdout,
'isatty') and sys.stdout.isatty()
) or 'ipykernel' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
self._start = time.time()
self._last_update = 0
def update(self, current):
self._seen_so_far = current
now = time.time()
info = ' - {0:.0f}s'.format(now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and self.target is not
None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
bar = '{2:{0:d}d}/{1} ['.format(
numdigits, self.target, current)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '{0:7d}/Unknown'.format(current)
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = int(time_per_unit * (self.target - current))
if eta > 3600:
eta_format = ('{0:d}:{1:02d}:{2:02d}'.format(
eta // 3600, (eta % 3600) // 60, eta % 60))
elif eta > 60:
eta_format = '{0:d}:{1:02d}'.format(eta // 60, eta % 60)
else:
eta_format = '{0:d}s'.format(eta)
info = ' - ETA: {0}'.format(eta_format)
else:
if time_per_unit >= 1:
info += ' {0:.0f}s/step'.format(time_per_unit)
elif time_per_unit >= 1e-3:
info += ' {0:.0f}ms/step'.format(time_per_unit * 1e3)
else:
info += ' {0:.0f}us/step'.format(time_per_unit * 1e6)
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def _extract_archive(file_path, path='.', archive_format='auto'):
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(
fname: str = None,
origin: str = None,
untar: bool = False,
extract: bool = False,
md5_hash: typing.Any = None,
file_hash: typing.Any = None,
hash_algorithm: str = 'auto',
archive_format: str = 'auto',
cache_subdir: typing.Union[Path, str] = 'data',
cache_dir: typing.Union[Path, str] = 'dataset',
verbose: int = 1
) -> str:
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.text2vec')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the file hash '
'does not match the original value of file_hash.'
' We will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size == -1:
total_size = None
ProgressTracker.progbar = Progbar(
target=total_size, verbose=verbose)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {} : {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
if ((algorithm == 'sha256') or (algorithm == 'auto' and len(
file_hash) == 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
def hash_file(fpath, algorithm='sha256', chunk_size=65535):
if algorithm == 'sha256':
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
| true | true |
f7337830bfdf6964254436e9e7154667341067f2 | 206 | py | Python | programs/models/__init__.py | bycristhian/psp | 019825e010386b6acc8c5466e7a6765218cb10d9 | [
"MIT"
] | 2 | 2020-09-04T17:06:41.000Z | 2020-10-05T01:46:20.000Z | programs/models/__init__.py | bycristhian/psp | 019825e010386b6acc8c5466e7a6765218cb10d9 | [
"MIT"
] | null | null | null | programs/models/__init__.py | bycristhian/psp | 019825e010386b6acc8c5466e7a6765218cb10d9 | [
"MIT"
] | null | null | null |
from .languages import ProgrammingLanguage
from .estimations import Estimation, SizeEstimation, TypePart
from .programs import Program, Report, Pip
from .parts_of_code import ReusedPart, BasePart, NewPart
| 34.333333 | 61 | 0.839806 |
from .languages import ProgrammingLanguage
from .estimations import Estimation, SizeEstimation, TypePart
from .programs import Program, Report, Pip
from .parts_of_code import ReusedPart, BasePart, NewPart
| true | true |
f73378aca4b59d93f62b1204f4f20afa24aae66e | 8,678 | py | Python | scripts/input_converter.py | hahahawu/Tagger | 180a0412abf571797638d024b8dacf9d776ee6f9 | [
"BSD-3-Clause"
] | 2 | 2019-04-21T12:04:38.000Z | 2019-07-11T06:40:59.000Z | scripts/input_converter.py | hahahawu/Tagger | 180a0412abf571797638d024b8dacf9d776ee6f9 | [
"BSD-3-Clause"
] | null | null | null | scripts/input_converter.py | hahahawu/Tagger | 180a0412abf571797638d024b8dacf9d776ee6f9 | [
"BSD-3-Clause"
] | null | null | null | # input_converter.py
# author: Playinf
# email: playinf@stu.xmu.edu.cn
import os
import six
import json
import random
import argparse
import tensorflow as tf
def load_vocab(filename):
fd = open(filename, "r")
count = 0
vocab = {}
for line in fd:
word = line.strip()
vocab[word] = count
count += 1
fd.close()
return vocab
def to_json(dictionary):
""" Convert python dictionary to JSON format """
return json.dumps(dictionary)
def to_dictionary(example):
""" Convert JSON/tf.train.Example to python dictionary """
if isinstance(example, str):
dictionary = json.loads(example)
elif isinstance(example, tf.train.Example):
dictionary = {}
keys = example.features.feature.keys()
values = example.features.feature.values()
for (k, v) in zip(keys, values):
int64_list = list(v.int64_list.value)
float_list = list(v.float_list.value)
bytes_list = list(v.bytes_list.value)
if int64_list:
dictionary[k] = int64_list
elif float_list:
dictionary[k] = float_list
elif bytes_list:
dictionary[k] = bytes_list
else:
raise ValueError("All lists are empty.")
else:
raise ValueError("Unsupported format")
return dictionary
def to_example(dictionary):
""" Convert python dictionary to tf.train.Example """
features = {}
for (k, v) in six.iteritems(dictionary):
if not v:
raise ValueError("Empty generated field: %s", str((k, v)))
if isinstance(v[0], six.integer_types):
int64_list = tf.train.Int64List(value=v)
features[k] = tf.train.Feature(int64_list=int64_list)
elif isinstance(v[0], float):
float_list = tf.train.FloatList(value=v)
features[k] = tf.train.Feature(float_list=float_list)
elif isinstance(v[0], six.string_types):
bytes_list = tf.train.BytesList(value=v)
features[k] = tf.train.Feature(bytes_list=bytes_list)
else:
raise ValueError("Value is neither an int nor a float; "
"v: %s type: %s" % (str(v[0]), str(type(v[0]))))
return tf.train.Example(features=tf.train.Features(feature=features))
def read_records(filename):
""" Read TensorFlow record """
reader = tf.python_io.tf_record_iterator(filename)
records = []
for record in reader:
records.append(record)
if len(records) % 10000 == 0:
tf.logging.info("read: %d", len(records))
return records
def write_records(records, out_filename):
""" Write to TensorFlow record """
writer = tf.python_io.TFRecordWriter(out_filename)
for count, record in enumerate(records):
writer.write(record)
if count % 10000 == 0:
tf.logging.info("write: %d", count)
writer.close()
def convert_record_to_json(pattern, output_name, output_dir, num_shards=1):
""" Convert TensorFlow record to JSON format """
output_files = []
writers = []
for shard in xrange(num_shards):
output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards)
output_file = os.path.join(output_dir, output_filename)
output_files.append(output_file)
writers.append(tf.gfile.GFile(output_file, "w"))
filenames = tf.gfile.Glob(pattern)
records = []
for filename in filenames:
records.extend(read_records(filename))
counter, shard = 0, 0
for record in records:
counter += 1
example = tf.train.Example()
example.ParseFromString(record)
features = to_dictionary(example)
json_str = to_json(features)
writers[shard].write(json_str + "\n")
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
# format:
# pred-pos tokens ||| labels
def convert_plain_to_json(name, vocabs, output_name, output_dir, num_shards,
lower=True, shuffle=True):
""" Convert plain SRL data to TensorFlow record """
vocab_token = load_vocab(vocabs[0])
vocab_label = load_vocab(vocabs[1])
records = []
unk = vocab_token["<unk>"]
with open(name) as fd:
for line in fd:
features, labels = line.strip().split("|||")
features = features.strip().split(" ")
labels = labels.strip().split(" ")
pred_pos = features[0]
inputs = features[1:]
if lower:
inputs = [item.lower() for item in inputs]
inputs = [vocab_token[item] if item in vocab_token else unk
for item in inputs]
labels = [vocab_label[item] for item in labels]
preds = [0 for _ in inputs]
preds[int(pred_pos)] = 1
feature = {
"inputs": inputs,
"preds": preds,
"targets": labels
}
records.append(feature)
if shuffle:
random.shuffle(records)
writers = []
output_files = []
for shard in xrange(num_shards):
output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards)
output_file = os.path.join(output_dir, output_filename)
output_files.append(output_file)
writers.append(tf.gfile.GFile(output_file, "w"))
counter, shard = 0, 0
for record in records:
counter += 1
features = record
json_str = to_json(features)
writers[shard].write(json_str + "\n")
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
# format:
# pred-pos tokens ||| labels
def convert_plain_to_record(name, vocabs, output_name, output_dir, num_shards,
lower=True, shuffle=True):
""" Convert plain SRL data to TensorFlow record """
vocab_token = load_vocab(vocabs[0])
vocab_label = load_vocab(vocabs[1])
records = []
unk = vocab_token["<unk>"]
with open(name) as fd:
for line in fd:
features, labels = line.strip().split("|||")
features = features.strip().split()
labels = labels.strip().split()
pred_pos = features[0]
inputs = features[1:]
if lower:
inputs = [item.lower() for item in inputs]
inputs = [vocab_token[item] if item in vocab_token else unk
for item in inputs]
labels = [vocab_label[item] for item in labels]
preds = [0 for _ in inputs]
preds[int(pred_pos)] = 1
feature = {
"inputs": inputs,
"preds": preds,
"targets": labels
}
records.append(feature)
if shuffle:
random.shuffle(records)
output_files = []
writers = []
for shard in xrange(num_shards):
output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards)
output_file = os.path.join(output_dir, output_filename)
output_files.append(output_file)
writers.append(tf.python_io.TFRecordWriter(output_file))
counter, shard = 0, 0
for record in records:
counter += 1
example = to_example(record)
writers[shard].write(example.SerializeToString())
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
def parse_args():
msg = "convert srl data to TensorFlow record format"
usage = "srl_input_converter.py [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
msg = "path of source file"
parser.add_argument("--input_path", required=True, type=str, help=msg)
msg = "output name"
parser.add_argument("--output_name", required=True, type=str, help=msg)
msg = "output directory"
parser.add_argument("--output_dir", required=True, type=str, help=msg)
msg = "path of vocabulary"
parser.add_argument("--vocab", type=str, nargs=2, help=msg)
msg = "number of output shards"
parser.add_argument("--num_shards", default=100, type=int, help=msg)
msg = "shuffle inputs"
parser.add_argument("--shuffle", action="store_true", help=msg)
msg = "use lowercase"
parser.add_argument("--lower", action="store_true", help=msg)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
convert_plain_to_record(args.input_path, args.vocab, args.output_name,
args.output_dir, args.num_shards, args.lower,
args.shuffle)
| 30.131944 | 78 | 0.595414 |
import os
import six
import json
import random
import argparse
import tensorflow as tf
def load_vocab(filename):
fd = open(filename, "r")
count = 0
vocab = {}
for line in fd:
word = line.strip()
vocab[word] = count
count += 1
fd.close()
return vocab
def to_json(dictionary):
return json.dumps(dictionary)
def to_dictionary(example):
if isinstance(example, str):
dictionary = json.loads(example)
elif isinstance(example, tf.train.Example):
dictionary = {}
keys = example.features.feature.keys()
values = example.features.feature.values()
for (k, v) in zip(keys, values):
int64_list = list(v.int64_list.value)
float_list = list(v.float_list.value)
bytes_list = list(v.bytes_list.value)
if int64_list:
dictionary[k] = int64_list
elif float_list:
dictionary[k] = float_list
elif bytes_list:
dictionary[k] = bytes_list
else:
raise ValueError("All lists are empty.")
else:
raise ValueError("Unsupported format")
return dictionary
def to_example(dictionary):
features = {}
for (k, v) in six.iteritems(dictionary):
if not v:
raise ValueError("Empty generated field: %s", str((k, v)))
if isinstance(v[0], six.integer_types):
int64_list = tf.train.Int64List(value=v)
features[k] = tf.train.Feature(int64_list=int64_list)
elif isinstance(v[0], float):
float_list = tf.train.FloatList(value=v)
features[k] = tf.train.Feature(float_list=float_list)
elif isinstance(v[0], six.string_types):
bytes_list = tf.train.BytesList(value=v)
features[k] = tf.train.Feature(bytes_list=bytes_list)
else:
raise ValueError("Value is neither an int nor a float; "
"v: %s type: %s" % (str(v[0]), str(type(v[0]))))
return tf.train.Example(features=tf.train.Features(feature=features))
def read_records(filename):
reader = tf.python_io.tf_record_iterator(filename)
records = []
for record in reader:
records.append(record)
if len(records) % 10000 == 0:
tf.logging.info("read: %d", len(records))
return records
def write_records(records, out_filename):
writer = tf.python_io.TFRecordWriter(out_filename)
for count, record in enumerate(records):
writer.write(record)
if count % 10000 == 0:
tf.logging.info("write: %d", count)
writer.close()
def convert_record_to_json(pattern, output_name, output_dir, num_shards=1):
output_files = []
writers = []
for shard in xrange(num_shards):
output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards)
output_file = os.path.join(output_dir, output_filename)
output_files.append(output_file)
writers.append(tf.gfile.GFile(output_file, "w"))
filenames = tf.gfile.Glob(pattern)
records = []
for filename in filenames:
records.extend(read_records(filename))
counter, shard = 0, 0
for record in records:
counter += 1
example = tf.train.Example()
example.ParseFromString(record)
features = to_dictionary(example)
json_str = to_json(features)
writers[shard].write(json_str + "\n")
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
def convert_plain_to_json(name, vocabs, output_name, output_dir, num_shards,
lower=True, shuffle=True):
vocab_token = load_vocab(vocabs[0])
vocab_label = load_vocab(vocabs[1])
records = []
unk = vocab_token["<unk>"]
with open(name) as fd:
for line in fd:
features, labels = line.strip().split("|||")
features = features.strip().split(" ")
labels = labels.strip().split(" ")
pred_pos = features[0]
inputs = features[1:]
if lower:
inputs = [item.lower() for item in inputs]
inputs = [vocab_token[item] if item in vocab_token else unk
for item in inputs]
labels = [vocab_label[item] for item in labels]
preds = [0 for _ in inputs]
preds[int(pred_pos)] = 1
feature = {
"inputs": inputs,
"preds": preds,
"targets": labels
}
records.append(feature)
if shuffle:
random.shuffle(records)
writers = []
output_files = []
for shard in xrange(num_shards):
output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards)
output_file = os.path.join(output_dir, output_filename)
output_files.append(output_file)
writers.append(tf.gfile.GFile(output_file, "w"))
counter, shard = 0, 0
for record in records:
counter += 1
features = record
json_str = to_json(features)
writers[shard].write(json_str + "\n")
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
def convert_plain_to_record(name, vocabs, output_name, output_dir, num_shards,
lower=True, shuffle=True):
vocab_token = load_vocab(vocabs[0])
vocab_label = load_vocab(vocabs[1])
records = []
unk = vocab_token["<unk>"]
with open(name) as fd:
for line in fd:
features, labels = line.strip().split("|||")
features = features.strip().split()
labels = labels.strip().split()
pred_pos = features[0]
inputs = features[1:]
if lower:
inputs = [item.lower() for item in inputs]
inputs = [vocab_token[item] if item in vocab_token else unk
for item in inputs]
labels = [vocab_label[item] for item in labels]
preds = [0 for _ in inputs]
preds[int(pred_pos)] = 1
feature = {
"inputs": inputs,
"preds": preds,
"targets": labels
}
records.append(feature)
if shuffle:
random.shuffle(records)
output_files = []
writers = []
for shard in xrange(num_shards):
output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards)
output_file = os.path.join(output_dir, output_filename)
output_files.append(output_file)
writers.append(tf.python_io.TFRecordWriter(output_file))
counter, shard = 0, 0
for record in records:
counter += 1
example = to_example(record)
writers[shard].write(example.SerializeToString())
shard = (shard + 1) % num_shards
for writer in writers:
writer.close()
def parse_args():
msg = "convert srl data to TensorFlow record format"
usage = "srl_input_converter.py [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
msg = "path of source file"
parser.add_argument("--input_path", required=True, type=str, help=msg)
msg = "output name"
parser.add_argument("--output_name", required=True, type=str, help=msg)
msg = "output directory"
parser.add_argument("--output_dir", required=True, type=str, help=msg)
msg = "path of vocabulary"
parser.add_argument("--vocab", type=str, nargs=2, help=msg)
msg = "number of output shards"
parser.add_argument("--num_shards", default=100, type=int, help=msg)
msg = "shuffle inputs"
parser.add_argument("--shuffle", action="store_true", help=msg)
msg = "use lowercase"
parser.add_argument("--lower", action="store_true", help=msg)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
convert_plain_to_record(args.input_path, args.vocab, args.output_name,
args.output_dir, args.num_shards, args.lower,
args.shuffle)
| true | true |
f733795fb48cb31e6b901353458853317334e76e | 7,191 | py | Python | run.py | dmachlanski/ce807 | 17c9b7ddd71906c018cd213a674f37cbed36856d | [
"MIT"
] | null | null | null | run.py | dmachlanski/ce807 | 17c9b7ddd71906c018cd213a674f37cbed36856d | [
"MIT"
] | null | null | null | run.py | dmachlanski/ce807 | 17c9b7ddd71906c018cd213a674f37cbed36856d | [
"MIT"
] | 1 | 2020-04-20T19:46:17.000Z | 2020-04-20T19:46:17.000Z | import numpy as np
import pandas as pd
import re, argparse, datetime
from timeit import default_timer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.metrics import f1_score, make_scorer
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
def get_parser():
""" Builds the argument parser for the program. """
parser = argparse.ArgumentParser()
parser.add_argument('-c', type=str, dest='clf_key', default='dt', choices=['dt', 'xts', 'rf'], help='A classifier to use.')
parser.add_argument('-m', type=str, dest='mode', default='test', choices=['cv', 'test'], help='Mode to run the program in (cross-validation or test).')
parser.add_argument('-k', type=int, dest='cv', default=5, help='Number of folds in KFold cross-validation.')
parser.add_argument('-d', '--data', type=str, dest='data_name', default='econbiz', help='Name of the dataset to use (econbiz or pubmed).')
parser.add_argument('-f', type=float, dest='data_fraction', default=0.1, help='The fraction of the data to be used (0, 1>.')
parser.add_argument('-t', type=float, dest='test_size', default=0.1, help='Test size (0, 1>.')
parser.add_argument('--max_depth', type=int, dest='max_depth', default=None, help='The maximum depth of the tree.')
parser.add_argument('--min_ss', type=int, dest='min_ss', default=2, help='The minimum number of samples required to split an internal tree node.')
parser.add_argument('--max_features', type=str, dest='max_features', default=None, help='The number of features to consider when looking for the best split in the tree.')
parser.add_argument('-n', type=int, dest='n_estimators', default=10, help='The number of estimators in the ensemble.')
parser.add_argument('-j', type=int, dest='n_jobs', default=-1, help='The number of jobs to run in parallel.')
parser.add_argument('-v', type=int, dest='verbose', default=0, help='Verbosity of the program.')
parser.add_argument('-b', '--batch', dest='is_batch_mode', action='store_true', default=False, help='Whether the program runs in a batch mode (affects file locations).')
return parser
def get_data(options):
""" Loads and pre-processes the data. """
if options.verbose > 0:
print(f'Loading data [dataset: {options.data_name}, fraction: {options.data_fraction}, test size: {options.test_size}]')
# Load the data.
location_prefix = '../../' if options.is_batch_mode else ''
data = pd.read_csv(f'{location_prefix}data/{options.data_name}.csv')
# Get raw values from the DataFrame.
X_all = data['title'].values
# Labels are separated by a '\t' character. Convert them into a list of labels per each data row.
Y_all = [x.split('\t') for x in data['labels'].values]
# Get only a fraction of the data if necessary
if options.data_fraction < 1.0:
data_slice = int(options.data_fraction * X_all.shape[0])
X_raw, Y_raw = X_all[:data_slice], Y_all[:data_slice]
else:
X_raw, Y_raw = X_all, Y_all
# Allow for tokens fitting into the following pattern only.
word_regexp = r"(?u)\b[a-zA-Z_][a-zA-Z_]+\b"
# Take only the most frequent 25k words. Use unigrams.
terms = CountVectorizer(input='content', stop_words='english', binary=False, token_pattern=word_regexp, max_features=25000, ngram_range=(1, 1))
X = terms.fit_transform(X_raw)
# Binrize the labels (convert them into a sparse matrix of one-hot vectors).
mlb = MultiLabelBinarizer(sparse_output=True)
Y = mlb.fit_transform(Y_raw)
return train_test_split(X, Y, test_size=options.test_size)
def get_model(options):
""" Prepare a classifier for training. """
classifiers = {
"dt" : DecisionTreeClassifier(max_depth=options.max_depth,
min_samples_split=options.min_ss,
max_features=options.max_features),
"xts" : ExtraTreesClassifier(n_estimators=options.n_estimators,
n_jobs=options.n_jobs,
max_depth=options.max_depth,
min_samples_split=options.min_ss,
max_features=options.max_features),
"rf" : RandomForestClassifier(n_estimators=options.n_estimators,
n_jobs=options.n_jobs,
max_depth=options.max_depth,
min_samples_split=options.min_ss,
max_features=options.max_features)
}
# Prepare the pipeline that consists of TF-IDF representation and a classifier.
trf = TfidfTransformer(sublinear_tf=False, use_idf=True, norm='l2')
clf = Pipeline([("trf", trf), ("clf", classifiers[options.clf_key])])
return clf
if __name__ == "__main__":
# Get and parse passed arguments.
parser = get_parser()
options = parser.parse_args()
if options.verbose > 0:
print('### Starting ###')
print('Arguments:', options)
X_train, X_test, Y_train, Y_test = get_data(options)
clf = get_model(options)
# The program can be run in either a 'cross-validation' or a 'test' mode.
# The former performs k-fold cross-validation, while the latter fits the selected model
# on the training data and runs predictions against the test set.
# Both modes report samples-based F1-score, fitting time and prediction time (in seconds).
if options.mode == 'cv':
if options.verbose > 0:
print(f'Running {options.cv}-fold cross-validation')
scores = cross_validate(clf, X_train.toarray(), Y_train.toarray(), cv=options.cv,
scoring=make_scorer(f1_score, average='samples'), n_jobs=options.n_jobs, verbose=options.verbose)
test_score = scores['test_score']
fit_time = scores['fit_time']
score_time = scores['score_time']
print("F1-score: %0.2f (+/- %0.2f)" % (test_score.mean(), test_score.std()))
print("Fit time: %0.2f (+/- %0.2f)" % (fit_time.mean(), fit_time.std()))
print("Prediction time: %0.2f (+/- %0.2f)" % (score_time.mean(), score_time.std()))
else:
if options.verbose > 0:
print('Training the model')
fit_time_start = default_timer()
clf.fit(X_train.toarray(), Y_train.toarray())
fit_time_end = default_timer()
if options.verbose > 0:
print('Running predictions')
pred_time_start = default_timer()
Y_pred = clf.predict(X_test.toarray())
pred_time_end = default_timer()
test_score = f1_score(Y_test.toarray(), Y_pred, average='samples')
print("F1-score: %0.2f" % (test_score))
print("Fit time: %0.2f" % (fit_time_end - fit_time_start))
print("Prediction time: %0.2f" % (pred_time_end - pred_time_start)) | 52.489051 | 174 | 0.65721 | import numpy as np
import pandas as pd
import re, argparse, datetime
from timeit import default_timer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.metrics import f1_score, make_scorer
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-c', type=str, dest='clf_key', default='dt', choices=['dt', 'xts', 'rf'], help='A classifier to use.')
parser.add_argument('-m', type=str, dest='mode', default='test', choices=['cv', 'test'], help='Mode to run the program in (cross-validation or test).')
parser.add_argument('-k', type=int, dest='cv', default=5, help='Number of folds in KFold cross-validation.')
parser.add_argument('-d', '--data', type=str, dest='data_name', default='econbiz', help='Name of the dataset to use (econbiz or pubmed).')
parser.add_argument('-f', type=float, dest='data_fraction', default=0.1, help='The fraction of the data to be used (0, 1>.')
parser.add_argument('-t', type=float, dest='test_size', default=0.1, help='Test size (0, 1>.')
parser.add_argument('--max_depth', type=int, dest='max_depth', default=None, help='The maximum depth of the tree.')
parser.add_argument('--min_ss', type=int, dest='min_ss', default=2, help='The minimum number of samples required to split an internal tree node.')
parser.add_argument('--max_features', type=str, dest='max_features', default=None, help='The number of features to consider when looking for the best split in the tree.')
parser.add_argument('-n', type=int, dest='n_estimators', default=10, help='The number of estimators in the ensemble.')
parser.add_argument('-j', type=int, dest='n_jobs', default=-1, help='The number of jobs to run in parallel.')
parser.add_argument('-v', type=int, dest='verbose', default=0, help='Verbosity of the program.')
parser.add_argument('-b', '--batch', dest='is_batch_mode', action='store_true', default=False, help='Whether the program runs in a batch mode (affects file locations).')
return parser
def get_data(options):
if options.verbose > 0:
print(f'Loading data [dataset: {options.data_name}, fraction: {options.data_fraction}, test size: {options.test_size}]')
location_prefix = '../../' if options.is_batch_mode else ''
data = pd.read_csv(f'{location_prefix}data/{options.data_name}.csv')
X_all = data['title'].values
Y_all = [x.split('\t') for x in data['labels'].values]
if options.data_fraction < 1.0:
data_slice = int(options.data_fraction * X_all.shape[0])
X_raw, Y_raw = X_all[:data_slice], Y_all[:data_slice]
else:
X_raw, Y_raw = X_all, Y_all
word_regexp = r"(?u)\b[a-zA-Z_][a-zA-Z_]+\b"
terms = CountVectorizer(input='content', stop_words='english', binary=False, token_pattern=word_regexp, max_features=25000, ngram_range=(1, 1))
X = terms.fit_transform(X_raw)
mlb = MultiLabelBinarizer(sparse_output=True)
Y = mlb.fit_transform(Y_raw)
return train_test_split(X, Y, test_size=options.test_size)
def get_model(options):
classifiers = {
"dt" : DecisionTreeClassifier(max_depth=options.max_depth,
min_samples_split=options.min_ss,
max_features=options.max_features),
"xts" : ExtraTreesClassifier(n_estimators=options.n_estimators,
n_jobs=options.n_jobs,
max_depth=options.max_depth,
min_samples_split=options.min_ss,
max_features=options.max_features),
"rf" : RandomForestClassifier(n_estimators=options.n_estimators,
n_jobs=options.n_jobs,
max_depth=options.max_depth,
min_samples_split=options.min_ss,
max_features=options.max_features)
}
trf = TfidfTransformer(sublinear_tf=False, use_idf=True, norm='l2')
clf = Pipeline([("trf", trf), ("clf", classifiers[options.clf_key])])
return clf
if __name__ == "__main__":
parser = get_parser()
options = parser.parse_args()
if options.verbose > 0:
print('### Starting ###')
print('Arguments:', options)
X_train, X_test, Y_train, Y_test = get_data(options)
clf = get_model(options)
if options.mode == 'cv':
if options.verbose > 0:
print(f'Running {options.cv}-fold cross-validation')
scores = cross_validate(clf, X_train.toarray(), Y_train.toarray(), cv=options.cv,
scoring=make_scorer(f1_score, average='samples'), n_jobs=options.n_jobs, verbose=options.verbose)
test_score = scores['test_score']
fit_time = scores['fit_time']
score_time = scores['score_time']
print("F1-score: %0.2f (+/- %0.2f)" % (test_score.mean(), test_score.std()))
print("Fit time: %0.2f (+/- %0.2f)" % (fit_time.mean(), fit_time.std()))
print("Prediction time: %0.2f (+/- %0.2f)" % (score_time.mean(), score_time.std()))
else:
if options.verbose > 0:
print('Training the model')
fit_time_start = default_timer()
clf.fit(X_train.toarray(), Y_train.toarray())
fit_time_end = default_timer()
if options.verbose > 0:
print('Running predictions')
pred_time_start = default_timer()
Y_pred = clf.predict(X_test.toarray())
pred_time_end = default_timer()
test_score = f1_score(Y_test.toarray(), Y_pred, average='samples')
print("F1-score: %0.2f" % (test_score))
print("Fit time: %0.2f" % (fit_time_end - fit_time_start))
print("Prediction time: %0.2f" % (pred_time_end - pred_time_start)) | true | true |
f7337a3169791bc62866a541b40acf4d1fcd1fe5 | 9,279 | py | Python | tests/utils/test_shell_util.py | ddiss/WALinuxAgent | 9c9893ebdec8a43bb15d84f309ff5b564436c408 | [
"Apache-2.0"
] | null | null | null | tests/utils/test_shell_util.py | ddiss/WALinuxAgent | 9c9893ebdec8a43bb15d84f309ff5b564436c408 | [
"Apache-2.0"
] | null | null | null | tests/utils/test_shell_util.py | ddiss/WALinuxAgent | 9c9893ebdec8a43bb15d84f309ff5b564436c408 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import unittest
import azurelinuxagent.common.utils.shellutil as shellutil
from tests.tools import AgentTestCase, patch
class ShellQuoteTestCase(AgentTestCase):
def test_shellquote(self):
self.assertEqual("\'foo\'", shellutil.quote("foo"))
self.assertEqual("\'foo bar\'", shellutil.quote("foo bar"))
self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar"))
class RunTestCase(AgentTestCase):
def test_it_should_return_the_exit_code_of_the_command(self):
exit_code = shellutil.run("exit 123")
self.assertEqual(123, exit_code)
def test_it_should_be_a_pass_thru_to_run_get_output(self):
with patch.object(shellutil, "run_get_output", return_value=(0, "")) as mock_run_get_output:
shellutil.run("echo hello word!", chk_err=False, expected_errors=[1, 2, 3])
self.assertEqual(mock_run_get_output.call_count, 1)
args, kwargs = mock_run_get_output.call_args
self.assertEqual(args[0], "echo hello word!")
self.assertEqual(kwargs["chk_err"], False)
self.assertEqual(kwargs["expected_errors"], [1, 2, 3])
class RunGetOutputTestCase(AgentTestCase):
def test_run_get_output(self):
output = shellutil.run_get_output(u"ls /")
self.assertNotEqual(None, output)
self.assertEqual(0, output[0])
err = shellutil.run_get_output(u"ls /not-exists")
self.assertNotEqual(0, err[0])
err = shellutil.run_get_output(u"ls 我")
self.assertNotEqual(0, err[0])
def test_it_should_log_the_command(self):
command = "echo hello world!"
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command)
self.assertEqual(mock_logger.verbose.call_count, 1)
args, kwargs = mock_logger.verbose.call_args # pylint: disable=unused-variable
command_in_message = args[1]
self.assertEqual(command_in_message, command)
def test_it_should_log_command_failures_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False)
self.assertEqual(mock_logger.error.call_count, 1)
args, kwargs = mock_logger.error.call_args # pylint: disable=unused-variable
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.info.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
def test_it_should_log_expected_errors_as_info(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code])
self.assertEqual(mock_logger.info.call_count, 1)
args, kwargs = mock_logger.info.call_args # pylint: disable=unused-variable
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
self.assertEqual(mock_logger.error.call_count, 0)
def test_it_should_log_unexpected_errors_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code + 1])
self.assertEqual(mock_logger.error.call_count, 1)
args, kwargs = mock_logger.error.call_args # pylint: disable=unused-variable
message = args[0] # message is similar to "Command: [exit 99], return code: [99], result: []"
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.info.call_count, 0)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
class RunCommandTestCase(AgentTestCase):
def test_run_command_should_execute_the_command(self):
command = ["echo", "-n", "A TEST STRING"]
ret = shellutil.run_command(command)
self.assertEqual(ret, "A TEST STRING")
def test_run_command_should_raise_an_exception_when_the_command_fails(self):
command = ["ls", "-d", "/etc", "nonexistent_file"]
with self.assertRaises(shellutil.CommandError) as context_manager:
shellutil.run_command(command)
exception = context_manager.exception
self.assertIn("'ls' failed: 2", str(exception))
self.assertIn("No such file or directory", str(exception))
self.assertEqual(exception.stdout, "/etc\n")
self.assertIn("No such file or directory", exception.stderr)
self.assertEqual(exception.returncode, 2)
def test_run_command_should_raise_an_exception_when_it_cannot_execute_the_command(self):
command = "nonexistent_command"
with self.assertRaises(Exception) as context_manager:
shellutil.run_command(command)
exception = context_manager.exception
self.assertIn("No such file or directory", str(exception))
@patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True)
def test_run_command_it_should_not_log_by_default(self, mock_logger):
def assert_no_message_logged(command):
try:
shellutil.run_command(command)
except: # pylint: disable=bare-except
pass
self.assertEqual(mock_logger.info.call_count, 0)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
self.assertEqual(mock_logger.error.call_count, 0)
assert_no_message_logged(["ls", "nonexistent_file"])
assert_no_message_logged("nonexistent_command")
def test_run_command_it_should_log_an_error_when_log_error_is_set(self):
command = ["ls", "-d", "/etc", "nonexistent_file"]
with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error:
try:
shellutil.run_command(command, log_error=True)
except: # pylint: disable=bare-except
pass
self.assertEqual(mock_log_error.call_count, 1)
args, kwargs = mock_log_error.call_args # pylint: disable=unused-variable
self.assertIn("ls -d /etc nonexistent_file", args, msg="The command was not logged")
self.assertIn(2, args, msg="The command's return code was not logged")
self.assertIn("/etc\n", args, msg="The command's stdout was not logged")
self.assertTrue(any("No such file or directory" in str(a) for a in args), msg="The command's stderr was not logged")
command = "nonexistent_command"
with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error:
try:
shellutil.run_command(command, log_error=True)
except: # pylint: disable=bare-except
pass
self.assertEqual(mock_log_error.call_count, 1)
args, kwargs = mock_log_error.call_args
self.assertIn(command, args, msg="The command was not logged")
self.assertTrue(any("No such file or directory" in str(a) for a in args), msg="The command's stderr was not logged")
def test_run_command_it_should_read_from_stdin_if_cmd_input_is_set(self):
import random
command = ["cat"]
random_hash = ''.join(random.choice('0123456789ABCDEF') for _ in range(16))
try:
output = shellutil.run_command(command, cmd_input=random_hash)
except: # pylint: disable=bare-except
self.fail("No exception should've been thrown when trying to read from stdin in run_command")
self.assertEqual(output, random_hash, "We're reading from stdin and printing it shell, output should match")
if __name__ == '__main__':
unittest.main()
| 42.177273 | 128 | 0.684664 |
import unittest
import azurelinuxagent.common.utils.shellutil as shellutil
from tests.tools import AgentTestCase, patch
class ShellQuoteTestCase(AgentTestCase):
def test_shellquote(self):
self.assertEqual("\'foo\'", shellutil.quote("foo"))
self.assertEqual("\'foo bar\'", shellutil.quote("foo bar"))
self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar"))
class RunTestCase(AgentTestCase):
def test_it_should_return_the_exit_code_of_the_command(self):
exit_code = shellutil.run("exit 123")
self.assertEqual(123, exit_code)
def test_it_should_be_a_pass_thru_to_run_get_output(self):
with patch.object(shellutil, "run_get_output", return_value=(0, "")) as mock_run_get_output:
shellutil.run("echo hello word!", chk_err=False, expected_errors=[1, 2, 3])
self.assertEqual(mock_run_get_output.call_count, 1)
args, kwargs = mock_run_get_output.call_args
self.assertEqual(args[0], "echo hello word!")
self.assertEqual(kwargs["chk_err"], False)
self.assertEqual(kwargs["expected_errors"], [1, 2, 3])
class RunGetOutputTestCase(AgentTestCase):
def test_run_get_output(self):
output = shellutil.run_get_output(u"ls /")
self.assertNotEqual(None, output)
self.assertEqual(0, output[0])
err = shellutil.run_get_output(u"ls /not-exists")
self.assertNotEqual(0, err[0])
err = shellutil.run_get_output(u"ls 我")
self.assertNotEqual(0, err[0])
def test_it_should_log_the_command(self):
command = "echo hello world!"
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command)
self.assertEqual(mock_logger.verbose.call_count, 1)
args, kwargs = mock_logger.verbose.call_args
command_in_message = args[1]
self.assertEqual(command_in_message, command)
def test_it_should_log_command_failures_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False)
self.assertEqual(mock_logger.error.call_count, 1)
args, kwargs = mock_logger.error.call_args
message = args[0]
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.info.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
def test_it_should_log_expected_errors_as_info(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code])
self.assertEqual(mock_logger.info.call_count, 1)
args, kwargs = mock_logger.info.call_args
message = args[0]
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
self.assertEqual(mock_logger.error.call_count, 0)
def test_it_should_log_unexpected_errors_as_errors(self):
return_code = 99
command = "exit {0}".format(return_code)
with patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True) as mock_logger:
shellutil.run_get_output(command, log_cmd=False, expected_errors=[return_code + 1])
self.assertEqual(mock_logger.error.call_count, 1)
args, kwargs = mock_logger.error.call_args
message = args[0]
self.assertIn("[{0}]".format(command), message)
self.assertIn("[{0}]".format(return_code), message)
self.assertEqual(mock_logger.info.call_count, 0)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
class RunCommandTestCase(AgentTestCase):
def test_run_command_should_execute_the_command(self):
command = ["echo", "-n", "A TEST STRING"]
ret = shellutil.run_command(command)
self.assertEqual(ret, "A TEST STRING")
def test_run_command_should_raise_an_exception_when_the_command_fails(self):
command = ["ls", "-d", "/etc", "nonexistent_file"]
with self.assertRaises(shellutil.CommandError) as context_manager:
shellutil.run_command(command)
exception = context_manager.exception
self.assertIn("'ls' failed: 2", str(exception))
self.assertIn("No such file or directory", str(exception))
self.assertEqual(exception.stdout, "/etc\n")
self.assertIn("No such file or directory", exception.stderr)
self.assertEqual(exception.returncode, 2)
def test_run_command_should_raise_an_exception_when_it_cannot_execute_the_command(self):
command = "nonexistent_command"
with self.assertRaises(Exception) as context_manager:
shellutil.run_command(command)
exception = context_manager.exception
self.assertIn("No such file or directory", str(exception))
@patch("azurelinuxagent.common.utils.shellutil.logger", autospec=True)
def test_run_command_it_should_not_log_by_default(self, mock_logger):
def assert_no_message_logged(command):
try:
shellutil.run_command(command)
except:
pass
self.assertEqual(mock_logger.info.call_count, 0)
self.assertEqual(mock_logger.verbose.call_count, 0)
self.assertEqual(mock_logger.warn.call_count, 0)
self.assertEqual(mock_logger.error.call_count, 0)
assert_no_message_logged(["ls", "nonexistent_file"])
assert_no_message_logged("nonexistent_command")
def test_run_command_it_should_log_an_error_when_log_error_is_set(self):
command = ["ls", "-d", "/etc", "nonexistent_file"]
with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error:
try:
shellutil.run_command(command, log_error=True)
except:
pass
self.assertEqual(mock_log_error.call_count, 1)
args, kwargs = mock_log_error.call_args
self.assertIn("ls -d /etc nonexistent_file", args, msg="The command was not logged")
self.assertIn(2, args, msg="The command's return code was not logged")
self.assertIn("/etc\n", args, msg="The command's stdout was not logged")
self.assertTrue(any("No such file or directory" in str(a) for a in args), msg="The command's stderr was not logged")
command = "nonexistent_command"
with patch("azurelinuxagent.common.utils.shellutil.logger.error") as mock_log_error:
try:
shellutil.run_command(command, log_error=True)
except: # pylint: disable=bare-except
pass
self.assertEqual(mock_log_error.call_count, 1)
args, kwargs = mock_log_error.call_args
self.assertIn(command, args, msg="The command was not logged")
self.assertTrue(any("No such file or directory" in str(a) for a in args), msg="The command's stderr was not logged")
def test_run_command_it_should_read_from_stdin_if_cmd_input_is_set(self):
import random
command = ["cat"]
random_hash = ''.join(random.choice('0123456789ABCDEF') for _ in range(16))
try:
output = shellutil.run_command(command, cmd_input=random_hash)
except:
self.fail("No exception should've been thrown when trying to read from stdin in run_command")
self.assertEqual(output, random_hash, "We're reading from stdin and printing it shell, output should match")
if __name__ == '__main__':
unittest.main()
| true | true |
f7337ac96da895a25491e4e7acdfb8a6693363e9 | 34,887 | py | Python | python/taichi/lang/kernel_impl.py | josephgalestian/taichiV2-master | 12a63a05fdccc824205b1ee6545e4706bf473405 | [
"MIT"
] | null | null | null | python/taichi/lang/kernel_impl.py | josephgalestian/taichiV2-master | 12a63a05fdccc824205b1ee6545e4706bf473405 | [
"MIT"
] | null | null | null | python/taichi/lang/kernel_impl.py | josephgalestian/taichiV2-master | 12a63a05fdccc824205b1ee6545e4706bf473405 | [
"MIT"
] | null | null | null | import ast
import functools
import inspect
import re
import sys
import textwrap
import numpy as np
import taichi.lang
from taichi._lib import core as _ti_core
from taichi.lang import impl, runtime_ops
from taichi.lang.ast import (ASTTransformerContext, KernelSimplicityASTChecker,
transform_tree)
from taichi.lang.enums import Layout
from taichi.lang.exception import (TaichiCompilationError,
TaichiRuntimeTypeError, TaichiSyntaxError)
from taichi.lang.expr import Expr
from taichi.lang.matrix import MatrixType
from taichi.lang.shell import _shell_pop_print, oinspect
from taichi.lang.util import has_pytorch, to_taichi_type
from taichi.linalg.sparse_matrix import sparse_matrix_builder
from taichi.types import any_arr, primitive_types, template
from taichi import _logging
if has_pytorch():
import torch
def func(fn):
"""Marks a function as callable in Taichi-scope.
This decorator transforms a Python function into a Taichi one. Taichi
will JIT compile it into native instructions.
Args:
fn (Callable): The Python function to be decorated
Returns:
Callable: The decorated function
Example::
>>> @ti.func
>>> def foo(x):
>>> return x + 2
>>>
>>> @ti.kernel
>>> def run():
>>> print(foo(40)) # 42
"""
is_classfunc = _inside_class(level_of_class_stackframe=3)
fun = Func(fn, _classfunc=is_classfunc)
@functools.wraps(fn)
def decorated(*args):
return fun.__call__(*args)
decorated._is_taichi_function = True
return decorated
def pyfunc(fn):
"""Marks a function as callable in both Taichi and Python scopes.
When called inside the Taichi scope, Taichi will JIT compile it into
native instructions. Otherwise it will be invoked directly as a
Python function.
See also :func:`~taichi.lang.kernel_impl.func`.
Args:
fn (Callable): The Python function to be decorated
Returns:
Callable: The decorated function
"""
is_classfunc = _inside_class(level_of_class_stackframe=3)
fun = Func(fn, _classfunc=is_classfunc, _pyfunc=True)
@functools.wraps(fn)
def decorated(*args):
return fun.__call__(*args)
decorated._is_taichi_function = True
return decorated
def _get_tree_and_ctx(self,
excluded_parameters=(),
is_kernel=True,
arg_features=None,
args=None,
ast_builder=None):
file = oinspect.getsourcefile(self.func)
src, start_lineno = oinspect.getsourcelines(self.func)
src = [textwrap.fill(line, tabsize=4, width=9999) for line in src]
tree = ast.parse(textwrap.dedent("\n".join(src)))
func_body = tree.body[0]
func_body.decorator_list = []
global_vars = _get_global_vars(self.func)
for i, arg in enumerate(func_body.args.args):
anno = arg.annotation
if isinstance(anno, ast.Name):
global_vars[anno.id] = self.argument_annotations[i]
if isinstance(func_body.returns, ast.Name):
global_vars[func_body.returns.id] = self.return_type
if is_kernel or impl.get_runtime().experimental_real_function:
# inject template parameters into globals
for i in self.template_slot_locations:
template_var_name = self.argument_names[i]
global_vars[template_var_name] = args[i]
return tree, ASTTransformerContext(excluded_parameters=excluded_parameters,
is_kernel=is_kernel,
func=self,
arg_features=arg_features,
global_vars=global_vars,
argument_data=args,
src=src,
start_lineno=start_lineno,
file=file,
ast_builder=ast_builder)
class Func:
function_counter = 0
def __init__(self, _func, _classfunc=False, _pyfunc=False):
self.func = _func
self.func_id = Func.function_counter
Func.function_counter += 1
self.compiled = None
self.classfunc = _classfunc
self.pyfunc = _pyfunc
self.argument_annotations = []
self.argument_names = []
self.return_type = None
self.extract_arguments()
self.template_slot_locations = []
for i, anno in enumerate(self.argument_annotations):
if isinstance(anno, template):
self.template_slot_locations.append(i)
self.mapper = TaichiCallableTemplateMapper(
self.argument_annotations, self.template_slot_locations)
self.taichi_functions = {} # The |Function| class in C++
def __call__(self, *args):
if not impl.inside_kernel():
if not self.pyfunc:
raise TaichiSyntaxError(
"Taichi functions cannot be called from Python-scope."
" Use @ti.pyfunc if you wish to call Taichi functions "
"from both Python-scope and Taichi-scope.")
return self.func(*args)
if impl.get_runtime().experimental_real_function:
if impl.get_runtime().current_kernel.is_grad:
raise TaichiSyntaxError(
"Real function in gradient kernels unsupported.")
instance_id, _ = self.mapper.lookup(args)
key = _ti_core.FunctionKey(self.func.__name__, self.func_id,
instance_id)
if self.compiled is None:
self.compiled = {}
if key.instance_id not in self.compiled:
self.do_compile(key=key, args=args)
return self.func_call_rvalue(key=key, args=args)
tree, ctx = _get_tree_and_ctx(
self,
is_kernel=False,
args=args,
ast_builder=impl.get_runtime().prog.current_ast_builder())
ret = transform_tree(tree, ctx)
if not impl.get_runtime().experimental_real_function:
if self.return_type and not ctx.returned:
raise TaichiSyntaxError(
"Function has a return type but does not have a return statement"
)
return ret
def func_call_rvalue(self, key, args):
# Skip the template args, e.g., |self|
assert impl.get_runtime().experimental_real_function
non_template_args = []
for i, anno in enumerate(self.argument_annotations):
if not isinstance(anno, template):
non_template_args.append(args[i])
non_template_args = impl.make_expr_group(non_template_args)
return Expr(
_ti_core.make_func_call_expr(
self.taichi_functions[key.instance_id], non_template_args))
def do_compile(self, key, args):
tree, ctx = _get_tree_and_ctx(self, is_kernel=False, args=args)
fn = impl.get_runtime().prog.create_function(key)
def func_body():
ctx.ast_builder = fn.ast_builder()
transform_tree(tree, ctx)
self.taichi_functions[key.instance_id] = fn
self.compiled[key.instance_id] = func_body
self.taichi_functions[key.instance_id].set_function_body(func_body)
def extract_arguments(self):
sig = inspect.signature(self.func)
if sig.return_annotation not in (inspect._empty, None):
self.return_type = sig.return_annotation
params = sig.parameters
arg_names = params.keys()
for i, arg_name in enumerate(arg_names):
param = params[arg_name]
if param.kind == inspect.Parameter.VAR_KEYWORD:
raise TaichiSyntaxError(
'Taichi functions do not support variable keyword parameters (i.e., **kwargs)'
)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
raise TaichiSyntaxError(
'Taichi functions do not support variable positional parameters (i.e., *args)'
)
if param.kind == inspect.Parameter.KEYWORD_ONLY:
raise TaichiSyntaxError(
'Taichi functions do not support keyword parameters')
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise TaichiSyntaxError(
'Taichi functions only support "positional or keyword" parameters'
)
annotation = param.annotation
if annotation is inspect.Parameter.empty:
if i == 0 and self.classfunc:
annotation = template()
# TODO: pyfunc also need type annotation check when real function is enabled,
# but that has to happen at runtime when we know which scope it's called from.
elif not self.pyfunc and impl.get_runtime(
).experimental_real_function:
raise TaichiSyntaxError(
f'Taichi function `{self.func.__name__}` parameter `{arg_name}` must be type annotated'
)
else:
if not id(annotation
) in primitive_types.type_ids and not isinstance(
annotation, template):
raise TaichiSyntaxError(
f'Invalid type annotation (argument {i}) of Taichi function: {annotation}'
)
self.argument_annotations.append(annotation)
self.argument_names.append(param.name)
class TaichiCallableTemplateMapper:
def __init__(self, annotations, template_slot_locations):
self.annotations = annotations
self.num_args = len(annotations)
self.template_slot_locations = template_slot_locations
self.mapping = {}
@staticmethod
def extract_arg(arg, anno):
if isinstance(anno, template):
if isinstance(arg, taichi.lang.snode.SNode):
return arg.ptr
if isinstance(arg, taichi.lang.expr.Expr):
return arg.ptr.get_underlying_ptr_address()
if isinstance(arg, _ti_core.Expr):
return arg.get_underlying_ptr_address()
if isinstance(arg, tuple):
return tuple(
TaichiCallableTemplateMapper.extract_arg(item, anno)
for item in arg)
return arg
if isinstance(anno, any_arr):
if isinstance(arg, taichi.lang._ndarray.ScalarNdarray):
anno.check_element_dim(arg, 0)
anno.check_element_shape(())
anno.check_field_dim(len(arg.shape))
return arg.dtype, len(arg.shape), (), Layout.AOS
if isinstance(arg, taichi.lang.matrix.VectorNdarray):
anno.check_element_dim(arg, 1)
anno.check_element_shape((arg.n, ))
anno.check_field_dim(len(arg.shape))
anno.check_layout(arg)
return arg.dtype, len(arg.shape) + 1, (arg.n, ), arg.layout
if isinstance(arg, taichi.lang.matrix.MatrixNdarray):
anno.check_element_dim(arg, 2)
anno.check_element_shape((arg.n, arg.m))
anno.check_field_dim(len(arg.shape))
anno.check_layout(arg)
return arg.dtype, len(arg.shape) + 2, (arg.n,
arg.m), arg.layout
# external arrays
element_dim = 0 if anno.element_dim is None else anno.element_dim
layout = Layout.AOS if anno.layout is None else anno.layout
shape = tuple(arg.shape)
if len(shape) < element_dim:
raise ValueError(
f"Invalid argument into ti.any_arr() - required element_dim={element_dim}, "
f"but the argument has only {len(shape)} dimensions")
element_shape = (
) if element_dim == 0 else shape[:
element_dim] if layout == Layout.SOA else shape[
-element_dim:]
return to_taichi_type(arg.dtype), len(shape), element_shape, layout
# Use '#' as a placeholder because other kinds of arguments are not involved in template instantiation
return '#'
def extract(self, args):
extracted = []
for arg, anno in zip(args, self.annotations):
extracted.append(self.extract_arg(arg, anno))
return tuple(extracted)
def lookup(self, args):
if len(args) != self.num_args:
raise TypeError(
f'{self.num_args} argument(s) needed but {len(args)} provided.'
)
key = self.extract(args)
if key not in self.mapping:
count = len(self.mapping)
self.mapping[key] = count
return self.mapping[key], key
def _get_global_vars(_func):
# Discussions: https://github.com/taichi-dev/taichi/issues/282
global_vars = _func.__globals__.copy()
freevar_names = _func.__code__.co_freevars
closure = _func.__closure__
if closure:
freevar_values = list(map(lambda x: x.cell_contents, closure))
for name, value in zip(freevar_names, freevar_values):
global_vars[name] = value
return global_vars
class Kernel:
counter = 0
def __init__(self, _func, is_grad, _classkernel=False):
self.func = _func
self.kernel_counter = Kernel.counter
Kernel.counter += 1
self.is_grad = is_grad
self.grad = None
self.argument_annotations = []
self.argument_names = []
self.return_type = None
self.classkernel = _classkernel
self.extract_arguments()
self.template_slot_locations = []
for i, anno in enumerate(self.argument_annotations):
if isinstance(anno, template):
self.template_slot_locations.append(i)
self.mapper = TaichiCallableTemplateMapper(
self.argument_annotations, self.template_slot_locations)
impl.get_runtime().kernels.append(self)
self.reset()
self.kernel_cpp = None
def reset(self):
self.runtime = impl.get_runtime()
if self.is_grad:
self.compiled_functions = self.runtime.compiled_grad_functions
else:
self.compiled_functions = self.runtime.compiled_functions
def extract_arguments(self):
sig = inspect.signature(self.func)
if sig.return_annotation not in (inspect._empty, None):
self.return_type = sig.return_annotation
params = sig.parameters
arg_names = params.keys()
for i, arg_name in enumerate(arg_names):
param = params[arg_name]
if param.kind == inspect.Parameter.VAR_KEYWORD:
raise TaichiSyntaxError(
'Taichi kernels do not support variable keyword parameters (i.e., **kwargs)'
)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
raise TaichiSyntaxError(
'Taichi kernels do not support variable positional parameters (i.e., *args)'
)
if param.default is not inspect.Parameter.empty:
raise TaichiSyntaxError(
'Taichi kernels do not support default values for arguments'
)
if param.kind == inspect.Parameter.KEYWORD_ONLY:
raise TaichiSyntaxError(
'Taichi kernels do not support keyword parameters')
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise TaichiSyntaxError(
'Taichi kernels only support "positional or keyword" parameters'
)
annotation = param.annotation
if param.annotation is inspect.Parameter.empty:
if i == 0 and self.classkernel: # The |self| parameter
annotation = template()
else:
raise TaichiSyntaxError(
'Taichi kernels parameters must be type annotated')
else:
if isinstance(annotation, (template, any_arr)):
pass
elif id(annotation) in primitive_types.type_ids:
pass
elif isinstance(annotation, sparse_matrix_builder):
pass
elif isinstance(annotation, MatrixType):
pass
else:
raise TaichiSyntaxError(
f'Invalid type annotation (argument {i}) of Taichi kernel: {annotation}'
)
self.argument_annotations.append(annotation)
self.argument_names.append(param.name)
def materialize(self, key=None, args=None, arg_features=None):
if key is None:
key = (self.func, 0)
self.runtime.materialize()
if key in self.compiled_functions:
return
grad_suffix = ""
if self.is_grad:
grad_suffix = "_grad"
kernel_name = f"{self.func.__name__}_c{self.kernel_counter}_{key[1]}{grad_suffix}"
_logging.trace(f"Compiling kernel {kernel_name}...")
tree, ctx = _get_tree_and_ctx(
self,
args=args,
excluded_parameters=self.template_slot_locations,
arg_features=arg_features)
if self.is_grad:
KernelSimplicityASTChecker(self.func).visit(tree)
# Do not change the name of 'taichi_ast_generator'
# The warning system needs this identifier to remove unnecessary messages
def taichi_ast_generator(kernel_cxx):
if self.runtime.inside_kernel:
raise TaichiSyntaxError(
"Kernels cannot call other kernels. I.e., nested kernels are not allowed. "
"Please check if you have direct/indirect invocation of kernels within kernels. "
"Note that some methods provided by the Taichi standard library may invoke kernels, "
"and please move their invocations to Python-scope.")
self.runtime.inside_kernel = True
self.runtime.current_kernel = self
try:
ctx.ast_builder = kernel_cxx.ast_builder()
transform_tree(tree, ctx)
if not impl.get_runtime().experimental_real_function:
if self.return_type and not ctx.returned:
raise TaichiSyntaxError(
"Kernel has a return type but does not have a return statement"
)
finally:
self.runtime.inside_kernel = False
self.runtime.current_kernel = None
taichi_kernel = impl.get_runtime().prog.create_kernel(
taichi_ast_generator, kernel_name, self.is_grad)
self.kernel_cpp = taichi_kernel
assert key not in self.compiled_functions
self.compiled_functions[key] = self.get_function_body(taichi_kernel)
def get_torch_callbacks(self, v, has_torch, is_ndarray=True):
callbacks = []
def get_call_back(u, v):
def call_back():
u.copy_(v)
return call_back
assert has_torch
assert isinstance(v, torch.Tensor)
if v._is_view():
raise ValueError(
"Torch view tensors are not supported, please call tensor.clone() before passing it into taichi kernel."
)
tmp = v
taichi_arch = self.runtime.prog.config.arch
# Ndarray means its memory is allocated on the specified taichi arch.
# Since torch only supports CPU & CUDA, torch-base ndarray only supports
# taichi cpu/cuda backend as well.
# Note I put x64/arm64/cuda here to be more specific.
assert not is_ndarray or taichi_arch in (
_ti_core.Arch.cuda, _ti_core.Arch.x64, _ti_core.Arch.arm64
), "Torch-based ndarray is only supported on taichi x64/arm64/cuda backend."
if str(v.device).startswith('cuda'):
# External tensor on cuda
if taichi_arch != _ti_core.Arch.cuda:
# copy data back to cpu
host_v = v.to(device='cpu', copy=True)
tmp = host_v
callbacks.append(get_call_back(v, host_v))
else:
# External tensor on cpu
if taichi_arch == _ti_core.Arch.cuda:
gpu_v = v.cuda()
tmp = gpu_v
callbacks.append(get_call_back(v, gpu_v))
return tmp, callbacks
def get_function_body(self, t_kernel):
# The actual function body
def func__(*args):
assert len(args) == len(
self.argument_annotations
), f'{len(self.argument_annotations)} arguments needed but {len(args)} provided'
tmps = []
callbacks = []
has_external_arrays = False
has_torch = has_pytorch()
ndarray_use_torch = impl.get_runtime().ndarray_use_torch
actual_argument_slot = 0
launch_ctx = t_kernel.make_launch_context()
for i, v in enumerate(args):
needed = self.argument_annotations[i]
if isinstance(needed, template):
continue
provided = type(v)
# Note: do not use sth like "needed == f32". That would be slow.
if id(needed) in primitive_types.real_type_ids:
if not isinstance(v, (float, int)):
raise TaichiRuntimeTypeError(i, needed.to_string(),
provided)
launch_ctx.set_arg_float(actual_argument_slot, float(v))
elif id(needed) in primitive_types.integer_type_ids:
if not isinstance(v, int):
raise TaichiRuntimeTypeError(i, needed.to_string(),
provided)
launch_ctx.set_arg_int(actual_argument_slot, int(v))
elif isinstance(needed, sparse_matrix_builder):
# Pass only the base pointer of the ti.linalg.sparse_matrix_builder() argument
launch_ctx.set_arg_int(actual_argument_slot, v.get_addr())
elif isinstance(needed, any_arr) and isinstance(
v, taichi.lang._ndarray.Ndarray):
has_external_arrays = True
v = v.arr
if ndarray_use_torch:
is_ndarray = True
tmp, torch_callbacks = self.get_torch_callbacks(
v, has_torch, is_ndarray)
callbacks += torch_callbacks
launch_ctx.set_arg_external_array_with_shape(
actual_argument_slot, int(tmp.data_ptr()),
tmp.element_size() * tmp.nelement(), v.shape)
else:
launch_ctx.set_arg_ndarray(actual_argument_slot, v)
elif isinstance(needed, any_arr) and (self.match_ext_arr(v)):
has_external_arrays = True
is_numpy = isinstance(v, np.ndarray)
if is_numpy:
tmp = np.ascontiguousarray(v)
# Purpose: DO NOT GC |tmp|!
tmps.append(tmp)
launch_ctx.set_arg_external_array_with_shape(
actual_argument_slot, int(tmp.ctypes.data),
tmp.nbytes, v.shape)
else:
is_ndarray = False
tmp, torch_callbacks = self.get_torch_callbacks(
v, has_torch, is_ndarray)
callbacks += torch_callbacks
launch_ctx.set_arg_external_array_with_shape(
actual_argument_slot, int(tmp.data_ptr()),
tmp.element_size() * tmp.nelement(), v.shape)
elif isinstance(needed, MatrixType):
if id(needed.dtype) in primitive_types.real_type_ids:
for a in range(needed.n):
for b in range(needed.m):
if not isinstance(v[a, b], (int, float)):
raise TaichiRuntimeTypeError(
i, needed.dtype.to_string(),
type(v[a, b]))
launch_ctx.set_arg_float(
actual_argument_slot, float(v[a, b]))
actual_argument_slot += 1
elif id(needed.dtype) in primitive_types.integer_type_ids:
for a in range(needed.n):
for b in range(needed.m):
if not isinstance(v[a, b], int):
raise TaichiRuntimeTypeError(
i, needed.dtype.to_string(),
type(v[a, b]))
launch_ctx.set_arg_int(actual_argument_slot,
int(v[a, b]))
actual_argument_slot += 1
else:
raise ValueError(
f'Matrix dtype {needed.dtype} is not integer type or real type.'
)
continue
else:
raise ValueError(
f'Argument type mismatch. Expecting {needed}, got {type(v)}.'
)
actual_argument_slot += 1
# Both the class kernels and the plain-function kernels are unified now.
# In both cases, |self.grad| is another Kernel instance that computes the
# gradient. For class kernels, args[0] is always the kernel owner.
if not self.is_grad and self.runtime.target_tape and not self.runtime.grad_replaced:
self.runtime.target_tape.insert(self, args)
t_kernel(launch_ctx)
ret = None
ret_dt = self.return_type
has_ret = ret_dt is not None
if has_ret or (impl.current_cfg().async_mode
and has_external_arrays):
runtime_ops.sync()
if has_ret:
if id(ret_dt) in primitive_types.integer_type_ids:
ret = t_kernel.get_ret_int(0)
else:
ret = t_kernel.get_ret_float(0)
if callbacks:
for c in callbacks:
c()
return ret
return func__
@staticmethod
def match_ext_arr(v):
has_array = isinstance(v, np.ndarray)
if not has_array and has_pytorch():
has_array = isinstance(v, torch.Tensor)
return has_array
def ensure_compiled(self, *args):
instance_id, arg_features = self.mapper.lookup(args)
key = (self.func, instance_id)
self.materialize(key=key, args=args, arg_features=arg_features)
return key
# For small kernels (< 3us), the performance can be pretty sensitive to overhead in __call__
# Thus this part needs to be fast. (i.e. < 3us on a 4 GHz x64 CPU)
@_shell_pop_print
def __call__(self, *args, **kwargs):
if self.is_grad and impl.current_cfg().opt_level == 0:
_logging.warn(
"""opt_level = 1 is enforced to enable gradient computation."""
)
impl.current_cfg().opt_level = 1
assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels'
key = self.ensure_compiled(*args)
return self.compiled_functions[key](*args)
# For a Taichi class definition like below:
#
# @ti.data_oriented
# class X:
# @ti.kernel
# def foo(self):
# ...
#
# When ti.kernel runs, the stackframe's |code_context| of Python 3.8(+) is
# different from that of Python 3.7 and below. In 3.8+, it is 'class X:',
# whereas in <=3.7, it is '@ti.data_oriented'. More interestingly, if the class
# inherits, i.e. class X(object):, then in both versions, |code_context| is
# 'class X(object):'...
_KERNEL_CLASS_STACKFRAME_STMT_RES = [
re.compile(r'@(\w+\.)?data_oriented'),
re.compile(r'class '),
]
def _inside_class(level_of_class_stackframe):
try:
maybe_class_frame = sys._getframe(level_of_class_stackframe)
statement_list = inspect.getframeinfo(maybe_class_frame)[3]
first_statment = statement_list[0].strip()
for pat in _KERNEL_CLASS_STACKFRAME_STMT_RES:
if pat.match(first_statment):
return True
except:
pass
return False
def _kernel_impl(_func, level_of_class_stackframe, verbose=False):
# Can decorators determine if a function is being defined inside a class?
# https://stackoverflow.com/a/8793684/12003165
is_classkernel = _inside_class(level_of_class_stackframe + 1)
if verbose:
print(f'kernel={_func.__name__} is_classkernel={is_classkernel}')
primal = Kernel(_func, is_grad=False, _classkernel=is_classkernel)
adjoint = Kernel(_func, is_grad=True, _classkernel=is_classkernel)
# Having |primal| contains |grad| makes the tape work.
primal.grad = adjoint
if is_classkernel:
# For class kernels, their primal/adjoint callables are constructed
# when the kernel is accessed via the instance inside
# _BoundedDifferentiableMethod.
# This is because we need to bind the kernel or |grad| to the instance
# owning the kernel, which is not known until the kernel is accessed.
#
# See also: _BoundedDifferentiableMethod, data_oriented.
@functools.wraps(_func)
def wrapped(*args, **kwargs):
# If we reach here (we should never), it means the class is not decorated
# with @ti.data_oriented, otherwise getattr would have intercepted the call.
clsobj = type(args[0])
assert not hasattr(clsobj, '_data_oriented')
raise TaichiSyntaxError(
f'Please decorate class {clsobj.__name__} with @ti.data_oriented'
)
else:
@functools.wraps(_func)
def wrapped(*args, **kwargs):
try:
return primal(*args, **kwargs)
except TaichiCompilationError as e:
raise type(e)('\n' + str(e)) from None
wrapped.grad = adjoint
wrapped._is_wrapped_kernel = True
wrapped._is_classkernel = is_classkernel
wrapped._primal = primal
wrapped._adjoint = adjoint
return wrapped
def kernel(fn):
"""Marks a function as a Taichi kernel.
A Taichi kernel is a function written in Python, and gets JIT compiled by
Taichi into native CPU/GPU instructions (e.g. a series of CUDA kernels).
The top-level ``for`` loops are automatically parallelized, and distributed
to either a CPU thread pool or massively parallel GPUs.
Kernel's gradient kernel would be generated automatically by the AutoDiff system.
See also https://docs.taichi.graphics/lang/articles/basic/syntax#kernels.
Args:
fn (Callable): the Python function to be decorated
Returns:
Callable: The decorated function
Example::
>>> x = ti.field(ti.i32, shape=(4, 8))
>>>
>>> @ti.kernel
>>> def run():
>>> # Assigns all the elements of `x` in parallel.
>>> for i in x:
>>> x[i] = i
"""
return _kernel_impl(fn, level_of_class_stackframe=3)
class _BoundedDifferentiableMethod:
def __init__(self, kernel_owner, wrapped_kernel_func):
clsobj = type(kernel_owner)
if not getattr(clsobj, '_data_oriented', False):
raise TaichiSyntaxError(
f'Please decorate class {clsobj.__name__} with @ti.data_oriented'
)
self._kernel_owner = kernel_owner
self._primal = wrapped_kernel_func._primal
self._adjoint = wrapped_kernel_func._adjoint
self._is_staticmethod = wrapped_kernel_func._is_staticmethod
self.__name__ = None
def __call__(self, *args, **kwargs):
if self._is_staticmethod:
return self._primal(*args, **kwargs)
return self._primal(self._kernel_owner, *args, **kwargs)
def grad(self, *args, **kwargs):
return self._adjoint(self._kernel_owner, *args, **kwargs)
def data_oriented(cls):
"""Marks a class as Taichi compatible.
To allow for modularized code, Taichi provides this decorator so that
Taichi kernels can be defined inside a class.
See also https://docs.taichi.graphics/lang/articles/advanced/odop
Example::
>>> @ti.data_oriented
>>> class TiArray:
>>> def __init__(self, n):
>>> self.x = ti.field(ti.f32, shape=n)
>>>
>>> @ti.kernel
>>> def inc(self):
>>> for i in self.x:
>>> self.x[i] += 1.0
>>>
>>> a = TiArray(32)
>>> a.inc()
Args:
cls (Class): the class to be decorated
Returns:
The decorated class.
"""
def _getattr(self, item):
method = cls.__dict__.get(item, None)
is_property = method.__class__ == property
is_staticmethod = method.__class__ == staticmethod
if is_property:
x = method.fget
else:
x = super(cls, self).__getattribute__(item)
if hasattr(x, '_is_wrapped_kernel'):
if inspect.ismethod(x):
wrapped = x.__func__
else:
wrapped = x
wrapped._is_staticmethod = is_staticmethod
assert inspect.isfunction(wrapped)
if wrapped._is_classkernel:
ret = _BoundedDifferentiableMethod(self, wrapped)
ret.__name__ = wrapped.__name__
if is_property:
return ret()
return ret
if is_property:
return x(self)
return x
cls.__getattribute__ = _getattr
cls._data_oriented = True
return cls
__all__ = ["data_oriented", "func", "kernel"]
| 40.238754 | 120 | 0.578468 | import ast
import functools
import inspect
import re
import sys
import textwrap
import numpy as np
import taichi.lang
from taichi._lib import core as _ti_core
from taichi.lang import impl, runtime_ops
from taichi.lang.ast import (ASTTransformerContext, KernelSimplicityASTChecker,
transform_tree)
from taichi.lang.enums import Layout
from taichi.lang.exception import (TaichiCompilationError,
TaichiRuntimeTypeError, TaichiSyntaxError)
from taichi.lang.expr import Expr
from taichi.lang.matrix import MatrixType
from taichi.lang.shell import _shell_pop_print, oinspect
from taichi.lang.util import has_pytorch, to_taichi_type
from taichi.linalg.sparse_matrix import sparse_matrix_builder
from taichi.types import any_arr, primitive_types, template
from taichi import _logging
if has_pytorch():
import torch
def func(fn):
is_classfunc = _inside_class(level_of_class_stackframe=3)
fun = Func(fn, _classfunc=is_classfunc)
@functools.wraps(fn)
def decorated(*args):
return fun.__call__(*args)
decorated._is_taichi_function = True
return decorated
def pyfunc(fn):
is_classfunc = _inside_class(level_of_class_stackframe=3)
fun = Func(fn, _classfunc=is_classfunc, _pyfunc=True)
@functools.wraps(fn)
def decorated(*args):
return fun.__call__(*args)
decorated._is_taichi_function = True
return decorated
def _get_tree_and_ctx(self,
excluded_parameters=(),
is_kernel=True,
arg_features=None,
args=None,
ast_builder=None):
file = oinspect.getsourcefile(self.func)
src, start_lineno = oinspect.getsourcelines(self.func)
src = [textwrap.fill(line, tabsize=4, width=9999) for line in src]
tree = ast.parse(textwrap.dedent("\n".join(src)))
func_body = tree.body[0]
func_body.decorator_list = []
global_vars = _get_global_vars(self.func)
for i, arg in enumerate(func_body.args.args):
anno = arg.annotation
if isinstance(anno, ast.Name):
global_vars[anno.id] = self.argument_annotations[i]
if isinstance(func_body.returns, ast.Name):
global_vars[func_body.returns.id] = self.return_type
if is_kernel or impl.get_runtime().experimental_real_function:
for i in self.template_slot_locations:
template_var_name = self.argument_names[i]
global_vars[template_var_name] = args[i]
return tree, ASTTransformerContext(excluded_parameters=excluded_parameters,
is_kernel=is_kernel,
func=self,
arg_features=arg_features,
global_vars=global_vars,
argument_data=args,
src=src,
start_lineno=start_lineno,
file=file,
ast_builder=ast_builder)
class Func:
function_counter = 0
def __init__(self, _func, _classfunc=False, _pyfunc=False):
self.func = _func
self.func_id = Func.function_counter
Func.function_counter += 1
self.compiled = None
self.classfunc = _classfunc
self.pyfunc = _pyfunc
self.argument_annotations = []
self.argument_names = []
self.return_type = None
self.extract_arguments()
self.template_slot_locations = []
for i, anno in enumerate(self.argument_annotations):
if isinstance(anno, template):
self.template_slot_locations.append(i)
self.mapper = TaichiCallableTemplateMapper(
self.argument_annotations, self.template_slot_locations)
self.taichi_functions = {}
def __call__(self, *args):
if not impl.inside_kernel():
if not self.pyfunc:
raise TaichiSyntaxError(
"Taichi functions cannot be called from Python-scope."
" Use @ti.pyfunc if you wish to call Taichi functions "
"from both Python-scope and Taichi-scope.")
return self.func(*args)
if impl.get_runtime().experimental_real_function:
if impl.get_runtime().current_kernel.is_grad:
raise TaichiSyntaxError(
"Real function in gradient kernels unsupported.")
instance_id, _ = self.mapper.lookup(args)
key = _ti_core.FunctionKey(self.func.__name__, self.func_id,
instance_id)
if self.compiled is None:
self.compiled = {}
if key.instance_id not in self.compiled:
self.do_compile(key=key, args=args)
return self.func_call_rvalue(key=key, args=args)
tree, ctx = _get_tree_and_ctx(
self,
is_kernel=False,
args=args,
ast_builder=impl.get_runtime().prog.current_ast_builder())
ret = transform_tree(tree, ctx)
if not impl.get_runtime().experimental_real_function:
if self.return_type and not ctx.returned:
raise TaichiSyntaxError(
"Function has a return type but does not have a return statement"
)
return ret
def func_call_rvalue(self, key, args):
assert impl.get_runtime().experimental_real_function
non_template_args = []
for i, anno in enumerate(self.argument_annotations):
if not isinstance(anno, template):
non_template_args.append(args[i])
non_template_args = impl.make_expr_group(non_template_args)
return Expr(
_ti_core.make_func_call_expr(
self.taichi_functions[key.instance_id], non_template_args))
def do_compile(self, key, args):
tree, ctx = _get_tree_and_ctx(self, is_kernel=False, args=args)
fn = impl.get_runtime().prog.create_function(key)
def func_body():
ctx.ast_builder = fn.ast_builder()
transform_tree(tree, ctx)
self.taichi_functions[key.instance_id] = fn
self.compiled[key.instance_id] = func_body
self.taichi_functions[key.instance_id].set_function_body(func_body)
def extract_arguments(self):
sig = inspect.signature(self.func)
if sig.return_annotation not in (inspect._empty, None):
self.return_type = sig.return_annotation
params = sig.parameters
arg_names = params.keys()
for i, arg_name in enumerate(arg_names):
param = params[arg_name]
if param.kind == inspect.Parameter.VAR_KEYWORD:
raise TaichiSyntaxError(
'Taichi functions do not support variable keyword parameters (i.e., **kwargs)'
)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
raise TaichiSyntaxError(
'Taichi functions do not support variable positional parameters (i.e., *args)'
)
if param.kind == inspect.Parameter.KEYWORD_ONLY:
raise TaichiSyntaxError(
'Taichi functions do not support keyword parameters')
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise TaichiSyntaxError(
'Taichi functions only support "positional or keyword" parameters'
)
annotation = param.annotation
if annotation is inspect.Parameter.empty:
if i == 0 and self.classfunc:
annotation = template()
elif not self.pyfunc and impl.get_runtime(
).experimental_real_function:
raise TaichiSyntaxError(
f'Taichi function `{self.func.__name__}` parameter `{arg_name}` must be type annotated'
)
else:
if not id(annotation
) in primitive_types.type_ids and not isinstance(
annotation, template):
raise TaichiSyntaxError(
f'Invalid type annotation (argument {i}) of Taichi function: {annotation}'
)
self.argument_annotations.append(annotation)
self.argument_names.append(param.name)
class TaichiCallableTemplateMapper:
def __init__(self, annotations, template_slot_locations):
self.annotations = annotations
self.num_args = len(annotations)
self.template_slot_locations = template_slot_locations
self.mapping = {}
@staticmethod
def extract_arg(arg, anno):
if isinstance(anno, template):
if isinstance(arg, taichi.lang.snode.SNode):
return arg.ptr
if isinstance(arg, taichi.lang.expr.Expr):
return arg.ptr.get_underlying_ptr_address()
if isinstance(arg, _ti_core.Expr):
return arg.get_underlying_ptr_address()
if isinstance(arg, tuple):
return tuple(
TaichiCallableTemplateMapper.extract_arg(item, anno)
for item in arg)
return arg
if isinstance(anno, any_arr):
if isinstance(arg, taichi.lang._ndarray.ScalarNdarray):
anno.check_element_dim(arg, 0)
anno.check_element_shape(())
anno.check_field_dim(len(arg.shape))
return arg.dtype, len(arg.shape), (), Layout.AOS
if isinstance(arg, taichi.lang.matrix.VectorNdarray):
anno.check_element_dim(arg, 1)
anno.check_element_shape((arg.n, ))
anno.check_field_dim(len(arg.shape))
anno.check_layout(arg)
return arg.dtype, len(arg.shape) + 1, (arg.n, ), arg.layout
if isinstance(arg, taichi.lang.matrix.MatrixNdarray):
anno.check_element_dim(arg, 2)
anno.check_element_shape((arg.n, arg.m))
anno.check_field_dim(len(arg.shape))
anno.check_layout(arg)
return arg.dtype, len(arg.shape) + 2, (arg.n,
arg.m), arg.layout
# external arrays
element_dim = 0 if anno.element_dim is None else anno.element_dim
layout = Layout.AOS if anno.layout is None else anno.layout
shape = tuple(arg.shape)
if len(shape) < element_dim:
raise ValueError(
f"Invalid argument into ti.any_arr() - required element_dim={element_dim}, "
f"but the argument has only {len(shape)} dimensions")
element_shape = (
) if element_dim == 0 else shape[:
element_dim] if layout == Layout.SOA else shape[
-element_dim:]
return to_taichi_type(arg.dtype), len(shape), element_shape, layout
# Use '
return '
def extract(self, args):
extracted = []
for arg, anno in zip(args, self.annotations):
extracted.append(self.extract_arg(arg, anno))
return tuple(extracted)
def lookup(self, args):
if len(args) != self.num_args:
raise TypeError(
f'{self.num_args} argument(s) needed but {len(args)} provided.'
)
key = self.extract(args)
if key not in self.mapping:
count = len(self.mapping)
self.mapping[key] = count
return self.mapping[key], key
def _get_global_vars(_func):
# Discussions: https://github.com/taichi-dev/taichi/issues/282
global_vars = _func.__globals__.copy()
freevar_names = _func.__code__.co_freevars
closure = _func.__closure__
if closure:
freevar_values = list(map(lambda x: x.cell_contents, closure))
for name, value in zip(freevar_names, freevar_values):
global_vars[name] = value
return global_vars
class Kernel:
counter = 0
def __init__(self, _func, is_grad, _classkernel=False):
self.func = _func
self.kernel_counter = Kernel.counter
Kernel.counter += 1
self.is_grad = is_grad
self.grad = None
self.argument_annotations = []
self.argument_names = []
self.return_type = None
self.classkernel = _classkernel
self.extract_arguments()
self.template_slot_locations = []
for i, anno in enumerate(self.argument_annotations):
if isinstance(anno, template):
self.template_slot_locations.append(i)
self.mapper = TaichiCallableTemplateMapper(
self.argument_annotations, self.template_slot_locations)
impl.get_runtime().kernels.append(self)
self.reset()
self.kernel_cpp = None
def reset(self):
self.runtime = impl.get_runtime()
if self.is_grad:
self.compiled_functions = self.runtime.compiled_grad_functions
else:
self.compiled_functions = self.runtime.compiled_functions
def extract_arguments(self):
sig = inspect.signature(self.func)
if sig.return_annotation not in (inspect._empty, None):
self.return_type = sig.return_annotation
params = sig.parameters
arg_names = params.keys()
for i, arg_name in enumerate(arg_names):
param = params[arg_name]
if param.kind == inspect.Parameter.VAR_KEYWORD:
raise TaichiSyntaxError(
'Taichi kernels do not support variable keyword parameters (i.e., **kwargs)'
)
if param.kind == inspect.Parameter.VAR_POSITIONAL:
raise TaichiSyntaxError(
'Taichi kernels do not support variable positional parameters (i.e., *args)'
)
if param.default is not inspect.Parameter.empty:
raise TaichiSyntaxError(
'Taichi kernels do not support default values for arguments'
)
if param.kind == inspect.Parameter.KEYWORD_ONLY:
raise TaichiSyntaxError(
'Taichi kernels do not support keyword parameters')
if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD:
raise TaichiSyntaxError(
'Taichi kernels only support "positional or keyword" parameters'
)
annotation = param.annotation
if param.annotation is inspect.Parameter.empty:
if i == 0 and self.classkernel: # The |self| parameter
annotation = template()
else:
raise TaichiSyntaxError(
'Taichi kernels parameters must be type annotated')
else:
if isinstance(annotation, (template, any_arr)):
pass
elif id(annotation) in primitive_types.type_ids:
pass
elif isinstance(annotation, sparse_matrix_builder):
pass
elif isinstance(annotation, MatrixType):
pass
else:
raise TaichiSyntaxError(
f'Invalid type annotation (argument {i}) of Taichi kernel: {annotation}'
)
self.argument_annotations.append(annotation)
self.argument_names.append(param.name)
def materialize(self, key=None, args=None, arg_features=None):
if key is None:
key = (self.func, 0)
self.runtime.materialize()
if key in self.compiled_functions:
return
grad_suffix = ""
if self.is_grad:
grad_suffix = "_grad"
kernel_name = f"{self.func.__name__}_c{self.kernel_counter}_{key[1]}{grad_suffix}"
_logging.trace(f"Compiling kernel {kernel_name}...")
tree, ctx = _get_tree_and_ctx(
self,
args=args,
excluded_parameters=self.template_slot_locations,
arg_features=arg_features)
if self.is_grad:
KernelSimplicityASTChecker(self.func).visit(tree)
# Do not change the name of 'taichi_ast_generator'
# The warning system needs this identifier to remove unnecessary messages
def taichi_ast_generator(kernel_cxx):
if self.runtime.inside_kernel:
raise TaichiSyntaxError(
"Kernels cannot call other kernels. I.e., nested kernels are not allowed. "
"Please check if you have direct/indirect invocation of kernels within kernels. "
"Note that some methods provided by the Taichi standard library may invoke kernels, "
"and please move their invocations to Python-scope.")
self.runtime.inside_kernel = True
self.runtime.current_kernel = self
try:
ctx.ast_builder = kernel_cxx.ast_builder()
transform_tree(tree, ctx)
if not impl.get_runtime().experimental_real_function:
if self.return_type and not ctx.returned:
raise TaichiSyntaxError(
"Kernel has a return type but does not have a return statement"
)
finally:
self.runtime.inside_kernel = False
self.runtime.current_kernel = None
taichi_kernel = impl.get_runtime().prog.create_kernel(
taichi_ast_generator, kernel_name, self.is_grad)
self.kernel_cpp = taichi_kernel
assert key not in self.compiled_functions
self.compiled_functions[key] = self.get_function_body(taichi_kernel)
def get_torch_callbacks(self, v, has_torch, is_ndarray=True):
callbacks = []
def get_call_back(u, v):
def call_back():
u.copy_(v)
return call_back
assert has_torch
assert isinstance(v, torch.Tensor)
if v._is_view():
raise ValueError(
"Torch view tensors are not supported, please call tensor.clone() before passing it into taichi kernel."
)
tmp = v
taichi_arch = self.runtime.prog.config.arch
# Ndarray means its memory is allocated on the specified taichi arch.
# Since torch only supports CPU & CUDA, torch-base ndarray only supports
# taichi cpu/cuda backend as well.
# Note I put x64/arm64/cuda here to be more specific.
assert not is_ndarray or taichi_arch in (
_ti_core.Arch.cuda, _ti_core.Arch.x64, _ti_core.Arch.arm64
), "Torch-based ndarray is only supported on taichi x64/arm64/cuda backend."
if str(v.device).startswith('cuda'):
# External tensor on cuda
if taichi_arch != _ti_core.Arch.cuda:
# copy data back to cpu
host_v = v.to(device='cpu', copy=True)
tmp = host_v
callbacks.append(get_call_back(v, host_v))
else:
# External tensor on cpu
if taichi_arch == _ti_core.Arch.cuda:
gpu_v = v.cuda()
tmp = gpu_v
callbacks.append(get_call_back(v, gpu_v))
return tmp, callbacks
def get_function_body(self, t_kernel):
# The actual function body
def func__(*args):
assert len(args) == len(
self.argument_annotations
), f'{len(self.argument_annotations)} arguments needed but {len(args)} provided'
tmps = []
callbacks = []
has_external_arrays = False
has_torch = has_pytorch()
ndarray_use_torch = impl.get_runtime().ndarray_use_torch
actual_argument_slot = 0
launch_ctx = t_kernel.make_launch_context()
for i, v in enumerate(args):
needed = self.argument_annotations[i]
if isinstance(needed, template):
continue
provided = type(v)
# Note: do not use sth like "needed == f32". That would be slow.
if id(needed) in primitive_types.real_type_ids:
if not isinstance(v, (float, int)):
raise TaichiRuntimeTypeError(i, needed.to_string(),
provided)
launch_ctx.set_arg_float(actual_argument_slot, float(v))
elif id(needed) in primitive_types.integer_type_ids:
if not isinstance(v, int):
raise TaichiRuntimeTypeError(i, needed.to_string(),
provided)
launch_ctx.set_arg_int(actual_argument_slot, int(v))
elif isinstance(needed, sparse_matrix_builder):
# Pass only the base pointer of the ti.linalg.sparse_matrix_builder() argument
launch_ctx.set_arg_int(actual_argument_slot, v.get_addr())
elif isinstance(needed, any_arr) and isinstance(
v, taichi.lang._ndarray.Ndarray):
has_external_arrays = True
v = v.arr
if ndarray_use_torch:
is_ndarray = True
tmp, torch_callbacks = self.get_torch_callbacks(
v, has_torch, is_ndarray)
callbacks += torch_callbacks
launch_ctx.set_arg_external_array_with_shape(
actual_argument_slot, int(tmp.data_ptr()),
tmp.element_size() * tmp.nelement(), v.shape)
else:
launch_ctx.set_arg_ndarray(actual_argument_slot, v)
elif isinstance(needed, any_arr) and (self.match_ext_arr(v)):
has_external_arrays = True
is_numpy = isinstance(v, np.ndarray)
if is_numpy:
tmp = np.ascontiguousarray(v)
# Purpose: DO NOT GC |tmp|!
tmps.append(tmp)
launch_ctx.set_arg_external_array_with_shape(
actual_argument_slot, int(tmp.ctypes.data),
tmp.nbytes, v.shape)
else:
is_ndarray = False
tmp, torch_callbacks = self.get_torch_callbacks(
v, has_torch, is_ndarray)
callbacks += torch_callbacks
launch_ctx.set_arg_external_array_with_shape(
actual_argument_slot, int(tmp.data_ptr()),
tmp.element_size() * tmp.nelement(), v.shape)
elif isinstance(needed, MatrixType):
if id(needed.dtype) in primitive_types.real_type_ids:
for a in range(needed.n):
for b in range(needed.m):
if not isinstance(v[a, b], (int, float)):
raise TaichiRuntimeTypeError(
i, needed.dtype.to_string(),
type(v[a, b]))
launch_ctx.set_arg_float(
actual_argument_slot, float(v[a, b]))
actual_argument_slot += 1
elif id(needed.dtype) in primitive_types.integer_type_ids:
for a in range(needed.n):
for b in range(needed.m):
if not isinstance(v[a, b], int):
raise TaichiRuntimeTypeError(
i, needed.dtype.to_string(),
type(v[a, b]))
launch_ctx.set_arg_int(actual_argument_slot,
int(v[a, b]))
actual_argument_slot += 1
else:
raise ValueError(
f'Matrix dtype {needed.dtype} is not integer type or real type.'
)
continue
else:
raise ValueError(
f'Argument type mismatch. Expecting {needed}, got {type(v)}.'
)
actual_argument_slot += 1
# Both the class kernels and the plain-function kernels are unified now.
# In both cases, |self.grad| is another Kernel instance that computes the
# gradient. For class kernels, args[0] is always the kernel owner.
if not self.is_grad and self.runtime.target_tape and not self.runtime.grad_replaced:
self.runtime.target_tape.insert(self, args)
t_kernel(launch_ctx)
ret = None
ret_dt = self.return_type
has_ret = ret_dt is not None
if has_ret or (impl.current_cfg().async_mode
and has_external_arrays):
runtime_ops.sync()
if has_ret:
if id(ret_dt) in primitive_types.integer_type_ids:
ret = t_kernel.get_ret_int(0)
else:
ret = t_kernel.get_ret_float(0)
if callbacks:
for c in callbacks:
c()
return ret
return func__
@staticmethod
def match_ext_arr(v):
has_array = isinstance(v, np.ndarray)
if not has_array and has_pytorch():
has_array = isinstance(v, torch.Tensor)
return has_array
def ensure_compiled(self, *args):
instance_id, arg_features = self.mapper.lookup(args)
key = (self.func, instance_id)
self.materialize(key=key, args=args, arg_features=arg_features)
return key
# For small kernels (< 3us), the performance can be pretty sensitive to overhead in __call__
# Thus this part needs to be fast. (i.e. < 3us on a 4 GHz x64 CPU)
@_shell_pop_print
def __call__(self, *args, **kwargs):
if self.is_grad and impl.current_cfg().opt_level == 0:
_logging.warn(
"""opt_level = 1 is enforced to enable gradient computation."""
)
impl.current_cfg().opt_level = 1
assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels'
key = self.ensure_compiled(*args)
return self.compiled_functions[key](*args)
# For a Taichi class definition like below:
#
# @ti.data_oriented
# class X:
# @ti.kernel
# def foo(self):
# ...
#
# When ti.kernel runs, the stackframe's |code_context| of Python 3.8(+) is
_KERNEL_CLASS_STACKFRAME_STMT_RES = [
re.compile(r'@(\w+\.)?data_oriented'),
re.compile(r'class '),
]
def _inside_class(level_of_class_stackframe):
try:
maybe_class_frame = sys._getframe(level_of_class_stackframe)
statement_list = inspect.getframeinfo(maybe_class_frame)[3]
first_statment = statement_list[0].strip()
for pat in _KERNEL_CLASS_STACKFRAME_STMT_RES:
if pat.match(first_statment):
return True
except:
pass
return False
def _kernel_impl(_func, level_of_class_stackframe, verbose=False):
is_classkernel = _inside_class(level_of_class_stackframe + 1)
if verbose:
print(f'kernel={_func.__name__} is_classkernel={is_classkernel}')
primal = Kernel(_func, is_grad=False, _classkernel=is_classkernel)
adjoint = Kernel(_func, is_grad=True, _classkernel=is_classkernel)
primal.grad = adjoint
if is_classkernel:
@functools.wraps(_func)
def wrapped(*args, **kwargs):
clsobj = type(args[0])
assert not hasattr(clsobj, '_data_oriented')
raise TaichiSyntaxError(
f'Please decorate class {clsobj.__name__} with @ti.data_oriented'
)
else:
@functools.wraps(_func)
def wrapped(*args, **kwargs):
try:
return primal(*args, **kwargs)
except TaichiCompilationError as e:
raise type(e)('\n' + str(e)) from None
wrapped.grad = adjoint
wrapped._is_wrapped_kernel = True
wrapped._is_classkernel = is_classkernel
wrapped._primal = primal
wrapped._adjoint = adjoint
return wrapped
def kernel(fn):
return _kernel_impl(fn, level_of_class_stackframe=3)
class _BoundedDifferentiableMethod:
def __init__(self, kernel_owner, wrapped_kernel_func):
clsobj = type(kernel_owner)
if not getattr(clsobj, '_data_oriented', False):
raise TaichiSyntaxError(
f'Please decorate class {clsobj.__name__} with @ti.data_oriented'
)
self._kernel_owner = kernel_owner
self._primal = wrapped_kernel_func._primal
self._adjoint = wrapped_kernel_func._adjoint
self._is_staticmethod = wrapped_kernel_func._is_staticmethod
self.__name__ = None
def __call__(self, *args, **kwargs):
if self._is_staticmethod:
return self._primal(*args, **kwargs)
return self._primal(self._kernel_owner, *args, **kwargs)
def grad(self, *args, **kwargs):
return self._adjoint(self._kernel_owner, *args, **kwargs)
def data_oriented(cls):
def _getattr(self, item):
method = cls.__dict__.get(item, None)
is_property = method.__class__ == property
is_staticmethod = method.__class__ == staticmethod
if is_property:
x = method.fget
else:
x = super(cls, self).__getattribute__(item)
if hasattr(x, '_is_wrapped_kernel'):
if inspect.ismethod(x):
wrapped = x.__func__
else:
wrapped = x
wrapped._is_staticmethod = is_staticmethod
assert inspect.isfunction(wrapped)
if wrapped._is_classkernel:
ret = _BoundedDifferentiableMethod(self, wrapped)
ret.__name__ = wrapped.__name__
if is_property:
return ret()
return ret
if is_property:
return x(self)
return x
cls.__getattribute__ = _getattr
cls._data_oriented = True
return cls
__all__ = ["data_oriented", "func", "kernel"]
| true | true |
f7337c2653fedb575c0e39cee804d8a17992b3b1 | 86 | py | Python | marquee/signals.py | garyjohnson/marquee | ed0379d50b10827179ec22937bdf1ec659651c89 | [
"MIT"
] | null | null | null | marquee/signals.py | garyjohnson/marquee | ed0379d50b10827179ec22937bdf1ec659651c89 | [
"MIT"
] | null | null | null | marquee/signals.py | garyjohnson/marquee | ed0379d50b10827179ec22937bdf1ec659651c89 | [
"MIT"
] | null | null | null | SHOW_MARQUEE = "SHOW_MARQUEE"
SHOW_WINDOW = "SHOW_WINDOW"
HIDE_WINDOW = "HIDE_WINDOW"
| 21.5 | 29 | 0.790698 | SHOW_MARQUEE = "SHOW_MARQUEE"
SHOW_WINDOW = "SHOW_WINDOW"
HIDE_WINDOW = "HIDE_WINDOW"
| true | true |
f7337c398a7271a3f1ad168aba4bf8992569a715 | 990 | py | Python | Estrutura While - Eric e Rafaela/Q04 - Eric e Rafaela.py | RafaelaBF/Exercicios_Python_Grupo | 03b983ab8b481fb7cdaf1bc9b84bb1c399abf538 | [
"MIT"
] | 2 | 2021-11-09T12:57:23.000Z | 2021-11-09T12:57:31.000Z | Estrutura While - Eric e Rafaela/Q04 - Eric e Rafaela.py | Ericcastell/Exercicios_Python_Grupo | 1581610bfa8905bc7e157fc8beb6c0efe103889e | [
"MIT"
] | null | null | null | Estrutura While - Eric e Rafaela/Q04 - Eric e Rafaela.py | Ericcastell/Exercicios_Python_Grupo | 1581610bfa8905bc7e157fc8beb6c0efe103889e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Rafaela e Eric
"""
'''
Questão 4:
Faça um programa que leia um número inteiro positivo
e em seguida monte a figura abaixo. (Não utilize vetor)
Exemplo:
Se o número digitado for n=0. Deverá aparecer na tela:
*
Se o número digitado for n=1. Deverá aparecer na tela:
*
*
Se o número digitado for n=2. Deverá aparecer na tela:
**
*
*
Se o número digitado for n=3. Deverá aparecer na tela:
******
**
*
*
Se o número digitado for n=4. Deverá aparecer na tela:
************************************************************************************************************************
******
**
*
*
'''
n = int(input("Entre com um número: "))
n1 = 1
n2 = 1
i = 0
while (i < n+1):
n3 = n1+n2
n1 = n2
n2 = n3
i += 1
maior = n2-n1
auxmaior = n1-maior
i=0
while (i < n+1):
x = maior
fat = 1
while (x > 1):
fat*=x
x-=1
print("*" * fat)
y = maior
maior = auxmaior
auxmaior = y-auxmaior
i+=1 | 18.679245 | 120 | 0.505051 |
n = int(input("Entre com um número: "))
n1 = 1
n2 = 1
i = 0
while (i < n+1):
n3 = n1+n2
n1 = n2
n2 = n3
i += 1
maior = n2-n1
auxmaior = n1-maior
i=0
while (i < n+1):
x = maior
fat = 1
while (x > 1):
fat*=x
x-=1
print("*" * fat)
y = maior
maior = auxmaior
auxmaior = y-auxmaior
i+=1 | true | true |
f7337d4ccc29cc699a347717a8b85238dc37a3e8 | 47 | py | Python | zlzzlzz2l/0208/2741.py | Kwak-JunYoung/154Algoritm-5weeks | fa18ae5f68a1ee722a30a05309214247f7fbfda4 | [
"MIT"
] | 3 | 2022-01-24T03:06:32.000Z | 2022-01-30T08:43:58.000Z | zlzzlzz2l/0208/2741.py | Kwak-JunYoung/154Algoritm-5weeks | fa18ae5f68a1ee722a30a05309214247f7fbfda4 | [
"MIT"
] | null | null | null | zlzzlzz2l/0208/2741.py | Kwak-JunYoung/154Algoritm-5weeks | fa18ae5f68a1ee722a30a05309214247f7fbfda4 | [
"MIT"
] | 2 | 2022-01-24T02:27:40.000Z | 2022-01-30T08:57:03.000Z | for i in range(1, int(input())+1):
print(i) | 23.5 | 34 | 0.574468 | for i in range(1, int(input())+1):
print(i) | true | true |
f7337f3e34d5b0e084b19c191435b6059b7623b7 | 275 | py | Python | class.py | maverick1599/CodeShot | a0c895d85b9b91931e5a252362e6f5c458328ae5 | [
"MIT"
] | 1 | 2020-11-15T14:58:53.000Z | 2020-11-15T14:58:53.000Z | class.py | hDmtP/CodeShot | 55ed95598fd1983436ce2032476010427928c5fc | [
"MIT"
] | 1 | 2019-10-14T02:47:49.000Z | 2019-10-14T02:47:49.000Z | class.py | hDmtP/CodeShot | 55ed95598fd1983436ce2032476010427928c5fc | [
"MIT"
] | 4 | 2019-10-06T05:51:18.000Z | 2021-10-17T08:44:41.000Z | class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return "({0},{1})".format(self.x, self.y)
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Point(x, y)
| 21.153846 | 49 | 0.490909 | class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return "({0},{1})".format(self.x, self.y)
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Point(x, y)
| true | true |
f7337f6ada4e726fd16d405b68ab684379f9d5ab | 1,878 | py | Python | src/main/python/systemds/operator/algorithm/builtin/lasso.py | dkerschbaumer/systemds | dc3a9f489951d7e13ec47c5181d2c5d7022665ce | [
"Apache-2.0"
] | null | null | null | src/main/python/systemds/operator/algorithm/builtin/lasso.py | dkerschbaumer/systemds | dc3a9f489951d7e13ec47c5181d2c5d7022665ce | [
"Apache-2.0"
] | null | null | null | src/main/python/systemds/operator/algorithm/builtin/lasso.py | dkerschbaumer/systemds | dc3a9f489951d7e13ec47c5181d2c5d7022665ce | [
"Apache-2.0"
] | null | null | null | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/lasso.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def lasso(X: OperationNode, y: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:
"""
:param X: input feature matrix
:param y: matrix Y columns of the design matrix
:param tol: target convergence tolerance
:param M: history length
:param tau: regularization component
:param maxi: maximum number of iterations until convergence
:return: 'OperationNode' containing
"""
X._check_matrix_op()
y._check_matrix_op()
params_dict = {'X':X, 'y':y}
params_dict.update(kwargs)
return OperationNode(X.sds_context, 'lasso', named_input_nodes=params_dict, output_type=OutputType.MATRIX)
| 38.326531 | 110 | 0.702343 |
from typing import Dict, Iterable
from systemds.operator import OperationNode
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def lasso(X: OperationNode, y: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:
X._check_matrix_op()
y._check_matrix_op()
params_dict = {'X':X, 'y':y}
params_dict.update(kwargs)
return OperationNode(X.sds_context, 'lasso', named_input_nodes=params_dict, output_type=OutputType.MATRIX)
| true | true |
f7338010ef40975077e4884f3ec3a8f8e841322c | 10,916 | py | Python | hypothesis/openannotation/elem_match.py | FrankensteinVariorum/fv-data | 2162770b06692da3b715c89f52eed5123c958979 | [
"Unlicense"
] | 2 | 2018-10-19T21:28:42.000Z | 2018-10-22T06:07:17.000Z | hypothesis/openannotation/elem_match.py | PghFrankenstein/fv-data | 2162770b06692da3b715c89f52eed5123c958979 | [
"Unlicense"
] | 19 | 2018-10-17T22:07:28.000Z | 2019-07-06T20:29:53.000Z | hypothesis/openannotation/elem_match.py | FrankensteinVariorum/fv-data | 2162770b06692da3b715c89f52eed5123c958979 | [
"Unlicense"
] | null | null | null | """
Map p elements from hypothesis annotation pointers to XML ids in the collation chunk XML
"""
import json
import re
import warnings
from glob import glob
from os import path
from lxml import etree, html
from itertools import groupby
class Annotation:
def __init__(self, js):
self.data = json.loads(js)
self.witness = re.match(
r".+Frankenstein_(.+)\.html", self.data["target"][0]["source"]
).groups()[0]
def get_selector(self, selector_type):
return [
s for s in self.data["target"][0]["selector"] if s["type"] == selector_type
][0]
def container_selector(self):
return self.get_selector("RangeSelector")
def text_selector(self):
return self.get_selector("TextQuoteSelector")
def start_c(self):
return self.container_selector()["startContainer"]
def end_c(self):
return self.container_selector()["endContainer"]
def p_index(self):
return self.start_p_index()
def start_p_index(self):
return self.pull_index("p", "start")
def end_p_index(self):
return self.pull_index("p", "end")
def head_index(self):
return self.start_head_index()
def start_head_index(self):
return self.pull_index("h3", "start")
def end_head_index(self):
return self.pull_index("h3", "end")
def pull_index(self, element, position):
if position == "start":
container = self.start_c()
else:
container = self.end_c()
try:
return int(re.match(f"/{element}\[(\d+)\]", container).groups()[0])
except:
return None
class Hypothesis:
def __init__(self, path):
self.annotations = [Annotation(line) for line in open(path, "r")]
def p_sort(self, witness_id):
return sorted(
[
a
for a in self.annotations
if a.p_index() is not None and a.witness == witness_id
],
key=lambda x: x.p_index(),
)
def head_sort(self, witness_id):
return sorted(
[
a
for a in self.annotations
if a.head_index() is not None and a.witness == witness_id
],
key=lambda x: x.head_index(),
)
class Collation:
def __init__(self, xml_path, witness):
self.ns = {"n": "http://www.tei-c.org/ns/1.0"}
self.tree = etree.parse(xml_path)
self.witness = witness
@property
def uri(self):
return (
f"https://frankensteinvariorum.github.io/fv-collation/{self.witness}.html"
)
def p_only(self):
return self.tree.xpath("//n:p", namespaces=self.ns)
def head_only(self):
return self.tree.xpath("//n:head", namespaces=self.ns)
def p_id(self, index):
try:
return self.p_only()[index].xpath("./@xml:id", namespaces=self.ns)[0]
except:
return None
def head_id(self, index):
try:
return self.head_only()[index].xpath("./@xml:id", namespaces=self.ns)[0]
except:
return None
def id_exists(self, xmlid):
res = self.tree.xpath(f"//*[@xml:id='{xmlid}']", namespaces=self.ns)
if len(res) > 1:
raise Exception(f"Id {xmlid} returned {len(res)} matches")
return len(res) == 1
def diagnostic(self, xml_id):
try:
return etree.tostring(
self.tree.xpath(f"//*[@xml:id='{xml_id}']", namespaces=self.ns)[0]
).decode("utf-8")
except:
return xml_id
class OpenAnnotation:
def __init__(self, annotations, collation, p_offset=-1, head_offset=0):
self.collation = collation
self.annotations = annotations
self.p_offset = p_offset
self.head_offset = head_offset
def oa_template(
self,
a,
start_xml_id,
end_xml_id,
start_html_index,
end_html_index,
target_witness=None,
):
if target_witness is None:
target_doc = self.collation
mirrored = False
else:
target_doc = target_witness
mirrored = True
body_content = [
{"type": "TextualBody", "purpose": "tagging", "value": t}
for t in a.data["tags"]
]
body_content.append(
{
"type": "TextualBody",
"value": a.data["text"],
"creator": "https://hypothes.is/users/frankensteinvariorum",
"modified": a.data["updated"],
"purpose": "commenting",
}
)
selectors = [
{
"type": "RangeSelector",
"startSelector": {
"type": "XPathSelector",
"value": f"//*[@xml:id='{start_xml_id}']",
},
"endSelector": {
"type": "XPathSelector",
"value": f"//*[@xml:id='{end_xml_id}']",
},
}
]
# if target_witness is None:
selectors.append(
{
"type": "TextQuoteSelector",
"prefix": a.text_selector()["prefix"],
"exact": a.text_selector()["exact"],
"suffix": a.text_selector()["suffix"],
}
)
return {
"@context": "http://www.w3.org/ns/anno.jsonld",
# NB the ID is unfinished at this stage, and will get its final incremental ID added in postprocessing
"id": f"https://frankensteinvariorum.github.io/annotations/{target_doc.witness}/",
"type": "Annotation",
"generator": {
"id": "https://frankensteinvariorum.github.io/viewer",
"type": "Software",
"name": "Frankenstein Variorum",
"homepage": "https://github.com/FrankensteinVariorum/fv-data",
},
"generated": a.data["created"],
"body": body_content,
"target": {"source": target_doc.uri, "type": "Text", "selector": selectors},
"mirrored": mirrored,
}
def generate_oa(self, variorum):
oa = []
# Match all the p elements
for a in self.annotations.p_sort(self.collation.witness):
start_xml_id = self.collation.p_id(a.start_p_index() + self.p_offset)
end_xml_id = self.collation.p_id(a.end_p_index() + self.p_offset)
oa.append(
self.oa_template(a, start_xml_id, end_xml_id, a.start_c(), a.end_c())
)
if "change-ann" in a.data["tags"]:
other_witnesses = variorum.get_other_witnesses(a.witness)
for wit in other_witnesses:
if wit.id_exists(start_xml_id) and wit.id_exists(end_xml_id):
oa.append(
self.oa_template(
a,
start_xml_id,
end_xml_id,
a.start_c(),
a.end_c(),
target_witness=wit,
)
)
# Match all the head elements
for a in self.annotations.head_sort(self.collation.witness):
start_xml_id = self.collation.head_id(
a.start_head_index() + self.head_offset
)
end_xml_id = self.collation.head_id(a.end_head_index() + self.head_offset)
oa.append(
self.oa_template(a, start_xml_id, end_xml_id, a.start_c(), a.end_c())
)
if "change-ann" in a.data["tags"]:
other_witnesses = variorum.get_other_witnesses(a.witness)
for wit in other_witnesses:
if wit.id_exists(start_xml_id) and wit.id_exists(end_xml_id):
oa.append(
self.oa_template(
a,
start_xml_id,
end_xml_id,
a.start_c(),
a.end_c(),
target_witness=wit,
)
)
return oa
class Variorum:
def __init__(self, w1818, w1823, w1831, wThomas):
self.w1818 = w1818
self.w1823 = w1823
self.w1831 = w1831
self.wThomas = wThomas
def get_witness(self, s):
if s == "1818":
return self.w1818
elif s == "1823":
return self.w1823
elif s == "1831":
return self.w1831
else:
raise Exception(f"'{s}' is not a valid witness identifier.")
def get_other_witnesses(self, s):
if s == "1818":
return [self.w1823, self.w1831, self.wThomas]
elif s == "1823":
return [self.w1818, self.w1831, self.wThomas]
elif s == "1831":
return [self.w1818, self.w1823, self.wThomas]
elif s == "Thom":
return [self.w1818, self.w1823, self.w1831]
else:
raise Exception(f"'{s}' is not a valid witness identifier.")
his = Hypothesis("hypothesis/data/hypothesis.json")
c1818 = Collation(xml_path="hypothesis/migration/xml-ids/1818_full.xml", witness="1818")
c1823 = Collation(xml_path="hypothesis/migration/xml-ids/1823_full.xml", witness="1823")
c1831 = Collation(xml_path="hypothesis/migration/xml-ids/1831_full.xml", witness="1831")
cThomas = Collation(
xml_path="hypothesis/migration/xml-ids/Thomas_full.xml", witness="Thom"
)
fv = Variorum(c1818, c1823, c1831, cThomas)
oa1818 = OpenAnnotation(annotations=his, collation=c1818, p_offset=1, head_offset=0)
oa1831 = OpenAnnotation(annotations=his, collation=c1831, p_offset=1, head_offset=-1)
oaThom = OpenAnnotation(annotations=his, collation=cThomas, p_offset=1)
oa1818anns = oa1818.generate_oa(variorum=fv)
oa1831anns = oa1831.generate_oa(variorum=fv)
oaThomanns = oaThom.generate_oa(variorum=fv)
bulk_annotations = sorted(
oa1818anns + oa1831anns + oaThomanns, key=lambda x: x["target"]["source"]
)
regrouped_annotations = groupby(bulk_annotations, lambda x: x["target"]["source"])
for group, grpr in regrouped_annotations:
fn = re.match(r".+fv-collation/(.+)\.html", group).groups()[0]
# Account for an eccentricity of annotation URLs during different points of migration
if fn == "Thom":
fn = "Thomas"
annotations = []
for i, g in enumerate(grpr):
g["id"] = g["id"] + str(i + 1)
annotations.append(g)
json.dump(
annotations,
open(f"hypothesis/openannotation/{fn}_xml_id_mapping.json", "w"),
indent=True,
)
| 32.585075 | 114 | 0.5393 |
import json
import re
import warnings
from glob import glob
from os import path
from lxml import etree, html
from itertools import groupby
class Annotation:
def __init__(self, js):
self.data = json.loads(js)
self.witness = re.match(
r".+Frankenstein_(.+)\.html", self.data["target"][0]["source"]
).groups()[0]
def get_selector(self, selector_type):
return [
s for s in self.data["target"][0]["selector"] if s["type"] == selector_type
][0]
def container_selector(self):
return self.get_selector("RangeSelector")
def text_selector(self):
return self.get_selector("TextQuoteSelector")
def start_c(self):
return self.container_selector()["startContainer"]
def end_c(self):
return self.container_selector()["endContainer"]
def p_index(self):
return self.start_p_index()
def start_p_index(self):
return self.pull_index("p", "start")
def end_p_index(self):
return self.pull_index("p", "end")
def head_index(self):
return self.start_head_index()
def start_head_index(self):
return self.pull_index("h3", "start")
def end_head_index(self):
return self.pull_index("h3", "end")
def pull_index(self, element, position):
if position == "start":
container = self.start_c()
else:
container = self.end_c()
try:
return int(re.match(f"/{element}\[(\d+)\]", container).groups()[0])
except:
return None
class Hypothesis:
def __init__(self, path):
self.annotations = [Annotation(line) for line in open(path, "r")]
def p_sort(self, witness_id):
return sorted(
[
a
for a in self.annotations
if a.p_index() is not None and a.witness == witness_id
],
key=lambda x: x.p_index(),
)
def head_sort(self, witness_id):
return sorted(
[
a
for a in self.annotations
if a.head_index() is not None and a.witness == witness_id
],
key=lambda x: x.head_index(),
)
class Collation:
def __init__(self, xml_path, witness):
self.ns = {"n": "http://www.tei-c.org/ns/1.0"}
self.tree = etree.parse(xml_path)
self.witness = witness
@property
def uri(self):
return (
f"https://frankensteinvariorum.github.io/fv-collation/{self.witness}.html"
)
def p_only(self):
return self.tree.xpath("//n:p", namespaces=self.ns)
def head_only(self):
return self.tree.xpath("//n:head", namespaces=self.ns)
def p_id(self, index):
try:
return self.p_only()[index].xpath("./@xml:id", namespaces=self.ns)[0]
except:
return None
def head_id(self, index):
try:
return self.head_only()[index].xpath("./@xml:id", namespaces=self.ns)[0]
except:
return None
def id_exists(self, xmlid):
res = self.tree.xpath(f"//*[@xml:id='{xmlid}']", namespaces=self.ns)
if len(res) > 1:
raise Exception(f"Id {xmlid} returned {len(res)} matches")
return len(res) == 1
def diagnostic(self, xml_id):
try:
return etree.tostring(
self.tree.xpath(f"//*[@xml:id='{xml_id}']", namespaces=self.ns)[0]
).decode("utf-8")
except:
return xml_id
class OpenAnnotation:
def __init__(self, annotations, collation, p_offset=-1, head_offset=0):
self.collation = collation
self.annotations = annotations
self.p_offset = p_offset
self.head_offset = head_offset
def oa_template(
self,
a,
start_xml_id,
end_xml_id,
start_html_index,
end_html_index,
target_witness=None,
):
if target_witness is None:
target_doc = self.collation
mirrored = False
else:
target_doc = target_witness
mirrored = True
body_content = [
{"type": "TextualBody", "purpose": "tagging", "value": t}
for t in a.data["tags"]
]
body_content.append(
{
"type": "TextualBody",
"value": a.data["text"],
"creator": "https://hypothes.is/users/frankensteinvariorum",
"modified": a.data["updated"],
"purpose": "commenting",
}
)
selectors = [
{
"type": "RangeSelector",
"startSelector": {
"type": "XPathSelector",
"value": f"//*[@xml:id='{start_xml_id}']",
},
"endSelector": {
"type": "XPathSelector",
"value": f"//*[@xml:id='{end_xml_id}']",
},
}
]
selectors.append(
{
"type": "TextQuoteSelector",
"prefix": a.text_selector()["prefix"],
"exact": a.text_selector()["exact"],
"suffix": a.text_selector()["suffix"],
}
)
return {
"@context": "http://www.w3.org/ns/anno.jsonld",
"id": f"https://frankensteinvariorum.github.io/annotations/{target_doc.witness}/",
"type": "Annotation",
"generator": {
"id": "https://frankensteinvariorum.github.io/viewer",
"type": "Software",
"name": "Frankenstein Variorum",
"homepage": "https://github.com/FrankensteinVariorum/fv-data",
},
"generated": a.data["created"],
"body": body_content,
"target": {"source": target_doc.uri, "type": "Text", "selector": selectors},
"mirrored": mirrored,
}
def generate_oa(self, variorum):
oa = []
for a in self.annotations.p_sort(self.collation.witness):
start_xml_id = self.collation.p_id(a.start_p_index() + self.p_offset)
end_xml_id = self.collation.p_id(a.end_p_index() + self.p_offset)
oa.append(
self.oa_template(a, start_xml_id, end_xml_id, a.start_c(), a.end_c())
)
if "change-ann" in a.data["tags"]:
other_witnesses = variorum.get_other_witnesses(a.witness)
for wit in other_witnesses:
if wit.id_exists(start_xml_id) and wit.id_exists(end_xml_id):
oa.append(
self.oa_template(
a,
start_xml_id,
end_xml_id,
a.start_c(),
a.end_c(),
target_witness=wit,
)
)
for a in self.annotations.head_sort(self.collation.witness):
start_xml_id = self.collation.head_id(
a.start_head_index() + self.head_offset
)
end_xml_id = self.collation.head_id(a.end_head_index() + self.head_offset)
oa.append(
self.oa_template(a, start_xml_id, end_xml_id, a.start_c(), a.end_c())
)
if "change-ann" in a.data["tags"]:
other_witnesses = variorum.get_other_witnesses(a.witness)
for wit in other_witnesses:
if wit.id_exists(start_xml_id) and wit.id_exists(end_xml_id):
oa.append(
self.oa_template(
a,
start_xml_id,
end_xml_id,
a.start_c(),
a.end_c(),
target_witness=wit,
)
)
return oa
class Variorum:
def __init__(self, w1818, w1823, w1831, wThomas):
self.w1818 = w1818
self.w1823 = w1823
self.w1831 = w1831
self.wThomas = wThomas
def get_witness(self, s):
if s == "1818":
return self.w1818
elif s == "1823":
return self.w1823
elif s == "1831":
return self.w1831
else:
raise Exception(f"'{s}' is not a valid witness identifier.")
def get_other_witnesses(self, s):
if s == "1818":
return [self.w1823, self.w1831, self.wThomas]
elif s == "1823":
return [self.w1818, self.w1831, self.wThomas]
elif s == "1831":
return [self.w1818, self.w1823, self.wThomas]
elif s == "Thom":
return [self.w1818, self.w1823, self.w1831]
else:
raise Exception(f"'{s}' is not a valid witness identifier.")
his = Hypothesis("hypothesis/data/hypothesis.json")
c1818 = Collation(xml_path="hypothesis/migration/xml-ids/1818_full.xml", witness="1818")
c1823 = Collation(xml_path="hypothesis/migration/xml-ids/1823_full.xml", witness="1823")
c1831 = Collation(xml_path="hypothesis/migration/xml-ids/1831_full.xml", witness="1831")
cThomas = Collation(
xml_path="hypothesis/migration/xml-ids/Thomas_full.xml", witness="Thom"
)
fv = Variorum(c1818, c1823, c1831, cThomas)
oa1818 = OpenAnnotation(annotations=his, collation=c1818, p_offset=1, head_offset=0)
oa1831 = OpenAnnotation(annotations=his, collation=c1831, p_offset=1, head_offset=-1)
oaThom = OpenAnnotation(annotations=his, collation=cThomas, p_offset=1)
oa1818anns = oa1818.generate_oa(variorum=fv)
oa1831anns = oa1831.generate_oa(variorum=fv)
oaThomanns = oaThom.generate_oa(variorum=fv)
bulk_annotations = sorted(
oa1818anns + oa1831anns + oaThomanns, key=lambda x: x["target"]["source"]
)
regrouped_annotations = groupby(bulk_annotations, lambda x: x["target"]["source"])
for group, grpr in regrouped_annotations:
fn = re.match(r".+fv-collation/(.+)\.html", group).groups()[0]
if fn == "Thom":
fn = "Thomas"
annotations = []
for i, g in enumerate(grpr):
g["id"] = g["id"] + str(i + 1)
annotations.append(g)
json.dump(
annotations,
open(f"hypothesis/openannotation/{fn}_xml_id_mapping.json", "w"),
indent=True,
)
| true | true |
f73380a44ac62c7125491bfa10cbafd2c518d561 | 1,702 | py | Python | zerver/management/commands/delete_old_unclaimed_attachments.py | cozyrohan/zulip | 909b484d648cdabc8854dbf8f33e92dda4876968 | [
"Apache-2.0"
] | 2 | 2021-02-02T01:29:32.000Z | 2021-02-02T01:30:51.000Z | zerver/management/commands/delete_old_unclaimed_attachments.py | cozyrohan/zulip | 909b484d648cdabc8854dbf8f33e92dda4876968 | [
"Apache-2.0"
] | 1 | 2021-01-07T15:28:54.000Z | 2021-01-08T15:38:45.000Z | zerver/management/commands/delete_old_unclaimed_attachments.py | cozyrohan/zulip | 909b484d648cdabc8854dbf8f33e92dda4876968 | [
"Apache-2.0"
] | 1 | 2020-12-03T17:08:44.000Z | 2020-12-03T17:08:44.000Z | from argparse import ArgumentParser
from typing import Any
from django.core.management.base import BaseCommand, CommandError
from zerver.lib.actions import do_delete_old_unclaimed_attachments
from zerver.models import get_old_unclaimed_attachments
class Command(BaseCommand):
help = """Remove unclaimed attachments from storage older than a supplied
numerical value indicating the limit of how old the attachment can be.
One week is taken as the default value."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('-w', '--weeks',
dest='delta_weeks',
default=5,
type=int,
help="Limiting value of how old the file can be.")
parser.add_argument('-f', '--for-real',
action='store_true',
help="Actually remove the files from the storage.")
def handle(self, *args: Any, **options: Any) -> None:
delta_weeks = options['delta_weeks']
print(f"Deleting unclaimed attached files older than {delta_weeks} weeks")
# print the list of files that are going to be removed
old_attachments = get_old_unclaimed_attachments(delta_weeks)
for old_attachment in old_attachments:
print(f"* {old_attachment.file_name} created at {old_attachment.create_time}")
print("")
if not options["for_real"]:
raise CommandError("This was a dry run. Pass -f to actually delete.")
do_delete_old_unclaimed_attachments(delta_weeks)
print("")
print("Unclaimed files deleted.")
| 40.52381 | 90 | 0.634548 | from argparse import ArgumentParser
from typing import Any
from django.core.management.base import BaseCommand, CommandError
from zerver.lib.actions import do_delete_old_unclaimed_attachments
from zerver.models import get_old_unclaimed_attachments
class Command(BaseCommand):
help = """Remove unclaimed attachments from storage older than a supplied
numerical value indicating the limit of how old the attachment can be.
One week is taken as the default value."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('-w', '--weeks',
dest='delta_weeks',
default=5,
type=int,
help="Limiting value of how old the file can be.")
parser.add_argument('-f', '--for-real',
action='store_true',
help="Actually remove the files from the storage.")
def handle(self, *args: Any, **options: Any) -> None:
delta_weeks = options['delta_weeks']
print(f"Deleting unclaimed attached files older than {delta_weeks} weeks")
old_attachments = get_old_unclaimed_attachments(delta_weeks)
for old_attachment in old_attachments:
print(f"* {old_attachment.file_name} created at {old_attachment.create_time}")
print("")
if not options["for_real"]:
raise CommandError("This was a dry run. Pass -f to actually delete.")
do_delete_old_unclaimed_attachments(delta_weeks)
print("")
print("Unclaimed files deleted.")
| true | true |
f73380b341c5fa85ff56206ba6036092064aa04f | 981 | py | Python | orchestra/contrib/bills/serializers.py | RubenPX/django-orchestra | 5ab4779e1ae12ec99569d682601b7810587ed381 | [
"Unlicense"
] | 68 | 2015-02-09T10:28:44.000Z | 2022-03-12T11:08:36.000Z | orchestra/contrib/bills/serializers.py | RubenPX/django-orchestra | 5ab4779e1ae12ec99569d682601b7810587ed381 | [
"Unlicense"
] | 17 | 2015-05-01T18:10:03.000Z | 2021-03-19T21:52:55.000Z | orchestra/contrib/bills/serializers.py | RubenPX/django-orchestra | 5ab4779e1ae12ec99569d682601b7810587ed381 | [
"Unlicense"
] | 29 | 2015-03-31T04:51:03.000Z | 2022-02-17T02:58:50.000Z | from rest_framework import serializers
from orchestra.api import router
from orchestra.contrib.accounts.models import Account
from orchestra.contrib.accounts.serializers import AccountSerializerMixin
from .models import Bill, BillLine, BillContact
class BillLineSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = BillLine
class BillSerializer(AccountSerializerMixin, serializers.HyperlinkedModelSerializer):
# lines = BillLineSerializer(source='lines')
class Meta:
model = Bill
fields = (
'url', 'id', 'number', 'type', 'total', 'is_sent', 'created_on', 'due_on',
'comments',
# 'lines'
)
class BillContactSerializer(AccountSerializerMixin, serializers.ModelSerializer):
class Meta:
model = BillContact
fields = ('name', 'address', 'city', 'zipcode', 'country', 'vat')
router.insert(Account, 'billcontact', BillContactSerializer, required=False)
| 28.028571 | 86 | 0.704383 | from rest_framework import serializers
from orchestra.api import router
from orchestra.contrib.accounts.models import Account
from orchestra.contrib.accounts.serializers import AccountSerializerMixin
from .models import Bill, BillLine, BillContact
class BillLineSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = BillLine
class BillSerializer(AccountSerializerMixin, serializers.HyperlinkedModelSerializer):
class Meta:
model = Bill
fields = (
'url', 'id', 'number', 'type', 'total', 'is_sent', 'created_on', 'due_on',
'comments',
)
class BillContactSerializer(AccountSerializerMixin, serializers.ModelSerializer):
class Meta:
model = BillContact
fields = ('name', 'address', 'city', 'zipcode', 'country', 'vat')
router.insert(Account, 'billcontact', BillContactSerializer, required=False)
| true | true |
f73380dc833787ad16db5e30fbd5be44452f83be | 1,894 | py | Python | Home/views.py | poppingpixel/DjangoWebsite.io | 5c8a1637333a9856bdb7785016a5d8c650eab76e | [
"Apache-2.0"
] | null | null | null | Home/views.py | poppingpixel/DjangoWebsite.io | 5c8a1637333a9856bdb7785016a5d8c650eab76e | [
"Apache-2.0"
] | null | null | null | Home/views.py | poppingpixel/DjangoWebsite.io | 5c8a1637333a9856bdb7785016a5d8c650eab76e | [
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render,redirect
from django.http import HttpResponse , HttpResponseRedirect
from django.contrib.auth.models import User , auth
from django.contrib.auth import authenticate , login , logout
from django.contrib import messages
def home(request):
return render(request,'home.html')
def handlesignup(request):
if request.method == 'POST':
Username = request.POST['Username']
Email = request.POST['Email']
Pass1= request.POST['Pass1']
Pass2= request.POST['Pass2']
if User.objects.filter(username=Username).exists():
messages.error(request,"Username Taken")
return redirect('home')
elif User.objects.filter(email=Email).exists():
messages.error(request,"Email taken")
return redirect('home')
elif Pass1 != Pass2:
messages.error(request,"password doesnot match")
else:
myuser = User.objects.create_user(Username,Email,Pass1)
myuser.save()
messages.success(request, "sucesss")
return HttpResponseRedirect('todo/')
else:
return HttpResponse("404 -page forbiden")
def handlelogout(request):
logout(request)
messages.success(request, "sucesssfully logged out")
return redirect('home')
def handlelogin(request):
if request.method == 'POST':
LoginPass1 = request.POST['LoginPass1']
LoginUsername = request.POST['LoginUsername']
user = authenticate(username=LoginUsername,password=LoginPass1)
if user is not None:
login(request,user)
messages.success(request, "sucesss")
return HttpResponseRedirect('todo/')
else:
messages.error(request,"check your")
return redirect('home')
# Create your views here.
| 30.548387 | 71 | 0.628828 |
from django.shortcuts import render,redirect
from django.http import HttpResponse , HttpResponseRedirect
from django.contrib.auth.models import User , auth
from django.contrib.auth import authenticate , login , logout
from django.contrib import messages
def home(request):
return render(request,'home.html')
def handlesignup(request):
if request.method == 'POST':
Username = request.POST['Username']
Email = request.POST['Email']
Pass1= request.POST['Pass1']
Pass2= request.POST['Pass2']
if User.objects.filter(username=Username).exists():
messages.error(request,"Username Taken")
return redirect('home')
elif User.objects.filter(email=Email).exists():
messages.error(request,"Email taken")
return redirect('home')
elif Pass1 != Pass2:
messages.error(request,"password doesnot match")
else:
myuser = User.objects.create_user(Username,Email,Pass1)
myuser.save()
messages.success(request, "sucesss")
return HttpResponseRedirect('todo/')
else:
return HttpResponse("404 -page forbiden")
def handlelogout(request):
logout(request)
messages.success(request, "sucesssfully logged out")
return redirect('home')
def handlelogin(request):
if request.method == 'POST':
LoginPass1 = request.POST['LoginPass1']
LoginUsername = request.POST['LoginUsername']
user = authenticate(username=LoginUsername,password=LoginPass1)
if user is not None:
login(request,user)
messages.success(request, "sucesss")
return HttpResponseRedirect('todo/')
else:
messages.error(request,"check your")
return redirect('home')
| true | true |
f73381290c29669886f42993b098e5b1d70cdb2c | 1,834 | py | Python | xor_gate_nn/datasets/keras_fn/datasets.py | AI-Huang/XOR_Gate_NN | d97c7fd7e5b046e84bd862081ab800b9ccbb1672 | [
"MIT"
] | null | null | null | xor_gate_nn/datasets/keras_fn/datasets.py | AI-Huang/XOR_Gate_NN | d97c7fd7e5b046e84bd862081ab800b9ccbb1672 | [
"MIT"
] | null | null | null | xor_gate_nn/datasets/keras_fn/datasets.py | AI-Huang/XOR_Gate_NN | d97c7fd7e5b046e84bd862081ab800b9ccbb1672 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Feb-09-21 22:23
# @Author : Kelly Hwong (dianhuangkan@gmail.com)
import numpy as np
import tensorflow as tf
class XOR_Dataset(tf.keras.utils.Sequence):
"""XOR_Dataset."""
def __init__(
self,
batch_size=1,
shuffle=False,
seed=42,
):
self.X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
self.y = np.array([[0], [1], [1], [0]])
assert batch_size <= 4
self.batch_size = batch_size # one by one learning
self.index = self._set_index_array()
self.shuffle = shuffle
def __getitem__(self, batch_index):
"""Gets batch at batch_index `batch_index`.
Arguments:
batch_index: batch_index of the batch in the Sequence.
Returns:
batch_x, batch_y: a batch of sequence data.
"""
batch_size = self.batch_size
sample_index = \
self.index[batch_index * batch_size:(batch_index+1) * batch_size]
batch_x = np.empty((batch_size, 2))
batch_y = np.empty(batch_size)
for _, i in enumerate(sample_index):
batch_x[_, ] = self.X[i, :]
batch_y[_] = self.y[i, :]
return batch_x, batch_y
def __len__(self):
"""Number of batches in the Sequence.
Returns:
The number of batches in the Sequence.
"""
return int(np.ceil(self.index.shape[0] / self.batch_size))
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def _set_index_array(self):
"""_set_index_array
"""
N = 4
return np.arange(0, N)
def main():
pass
if __name__ == "__main__":
main()
| 24.131579 | 77 | 0.558888 |
import numpy as np
import tensorflow as tf
class XOR_Dataset(tf.keras.utils.Sequence):
def __init__(
self,
batch_size=1,
shuffle=False,
seed=42,
):
self.X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
self.y = np.array([[0], [1], [1], [0]])
assert batch_size <= 4
self.batch_size = batch_size
self.index = self._set_index_array()
self.shuffle = shuffle
def __getitem__(self, batch_index):
batch_size = self.batch_size
sample_index = \
self.index[batch_index * batch_size:(batch_index+1) * batch_size]
batch_x = np.empty((batch_size, 2))
batch_y = np.empty(batch_size)
for _, i in enumerate(sample_index):
batch_x[_, ] = self.X[i, :]
batch_y[_] = self.y[i, :]
return batch_x, batch_y
def __len__(self):
return int(np.ceil(self.index.shape[0] / self.batch_size))
def __iter__(self):
for item in (self[i] for i in range(len(self))):
yield item
def _set_index_array(self):
N = 4
return np.arange(0, N)
def main():
pass
if __name__ == "__main__":
main()
| true | true |
f73382abafc05d4a3cfa356e78df80f0a7b037f9 | 23,188 | py | Python | jax/experimental/loops.py | austinpeel/jax | 1e625dd3483fea07b65a7a6f701194e20f66cf45 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-11-17T13:36:58.000Z | 2020-11-17T13:36:58.000Z | jax/experimental/loops.py | hanxiao/jax | ca766caa02296023bd6714bb7fdba064a45e2258 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | jax/experimental/loops.py | hanxiao/jax | ca766caa02296023bd6714bb7fdba064a45e2258 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-07-17T18:17:31.000Z | 2020-07-17T18:17:31.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loops is an **experimental** module for syntactic sugar for loops and control-flow.
The current implementation should convert loops correctly to JAX internal
representation, and most transformations should work (see below), but we have
not yet fine-tuned the performance of the resulting XLA compilation!
By default, loops and control-flow in JAX are executed and inlined during tracing.
For example, in the following code the `for` loop is unrolled during JAX tracing::
arr = np.zeros(5)
for i in range(arr.shape[0]):
arr[i] += 2.
if i % 2 == 0:
arr[i] += 1.
In order to capture the structured control-flow one has to use the higher-order
JAX operations, which require you to express the body of the loops and
conditionals as functions, and the array updates using a functional style that
returns an updated array, e.g.::
arr = np.zeros(5)
def loop_body(i, acc_arr):
arr1 = ops.index_update(acc_arr, i, acc_arr[i] + 2.)
return lax.cond(i % 2 == 0,
arr1,
lambda arr1: ops.index_update(arr1, i, arr1[i] + 1),
arr1,
lambda arr1: arr1)
arr = lax.fori_loop(0, arr.shape[0], loop_body, arr)
The default notation quickly gets unreadable with deeper nested loops.
With the utilities in this module you can write loops and conditionals that
look closer to plain Python, as long as you keep the loop-carried state in a
special `loops.scope` object and use `for` loops over special
`scope.range` iterators::
from jax.experimental import loops
with loops.Scope() as s:
s.arr = np.zeros(5) # Create the mutable state of the loop as `scope` fields.
for i in s.range(s.arr.shape[0]):
s.arr = ops.index_update(s.arr, i, s.arr[i] + 2.)
for _ in s.cond_range(i % 2 == 0): # Conditionals as loops with 0 or 1 iterations
s.arr = ops.index_update(s.arr, i, s.arr[i] + 1.)
Loops constructed with `range` must have literal constant bounds. If you need
loops with dynamic bounds, you can use the more general `while_range` iterator.
However, in that case that `grad` transformation is not supported::
s.idx = start
for _ in s.while_range(lambda: s.idx < end):
s.idx += 1
Notes:
* Loops and conditionals to be functionalized can appear only inside scopes
constructed with `loops.Scope` and they must use one of the `Scope.range`
iterators. All other loops are unrolled during tracing, as usual in JAX.
* Only scope data (stored in fields of the scope object) is functionalized.
All other state, e.g., in other Python variables, will not be considered as
being part of the loop output. All references to the mutable state should be
through the scope: `s.arr`.
* Conceptually, this model is still "functional" in the sense that a loop over
a `Scope.range` behaves as a function whose input and output is the scope data.
* Scopes should be passed down to callees that need to use loop
functionalization, or they may be nested.
* The programming model is that the loop body over a `scope.range` is traced
only once, using abstract shape values, similar to how JAX traces function
bodies.
Restrictions:
* The tracing of the loop body should not exit prematurely with `return`,
`exception`, `break`. This would be detected and reported as errors when we
encounter unnested scopes.
* The loop index variable should not be used after the loop. Similarly, one
should not use outside the loop data computed in the loop body, except data
stored in fields of the scope object.
* No new mutable state can be created inside a loop to be functionalized.
All mutable state must be created outside all loops and conditionals.
* For a `while` loop, the conditional function is not allowed to modify the
scope state. This is a checked error. Also, for `while` loops the `grad`
transformation does not work. An alternative that allows `grad` is a bounded
loop (`range`).
Transformations:
* All transformations are supported, except `grad` is not supported for
`Scope.while_range` loops.
* `vmap` is very useful for such loops because it pushes more work into the
inner-loops, which should help performance for accelerators.
For usage example, see tests/loops_test.py.
"""
import copy
from functools import partial
import itertools
import numpy as np
import traceback
from typing import Any, List, cast
from jax import abstract_arrays
from jax import lax, core
from jax._src.lax import control_flow as lax_control_flow
from jax import tree_util
from jax import numpy as jnp
from jax.interpreters import partial_eval as pe
from jax.util import safe_map
from jax.config import config
class Scope(object):
"""A scope context manager to keep the state of loop bodies for functionalization.
Usage::
with Scope() as s:
s.data = 0.
for i in s.range(5):
s.data += 1.
return s.data
"""
def __init__(self):
self._mutable_state = {} # state to be functionalized, indexed by name.
self._active_ranges = [] # stack of active ranges, last one is the innermost.
self._count_subtraces = 0 # How many net started subtraces, for error recovery
def range(self, first, second=None, third=None):
"""Creates an iterator for bounded iterations to be functionalized.
The body is converted to a `lax.scan`, for which all JAX transformations work.
The `first`, `second`, and `third` arguments must be integer literals.
Usage::
range(5) # start=0, end=5, step=1
range(1, 5) # start=1, end=5, step=1
range(1, 5, 2) # start=1, end=5, step=2
s.out = 1.
for i in scope.range(5):
s.out += 1.
"""
if third is not None:
start = int(first)
stop = int(second)
step = int(third)
else:
step = 1
if second is not None:
start = int(first)
stop = int(second)
else:
start = 0
stop = int(first)
return _BodyTracer(self, _BoundedLoopBuilder(start, stop, step))
def cond_range(self, pred):
"""Creates a conditional iterator with 0 or 1 iterations based on the boolean.
The body is converted to a `lax.cond`. All JAX transformations work.
Usage::
for _ in scope.cond_range(s.field < 0.):
s.field = - s.field
"""
# TODO: share these checks with lax_control_flow.cond
if len(np.shape(pred)) != 0:
raise TypeError(
"Pred must be a scalar, got {} of shape {}.".format(pred, np.shape(pred)))
try:
pred_dtype = np.result_type(pred)
except TypeError as err:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred)) from err
if pred_dtype.kind != 'b':
if pred_dtype.kind in 'iuf':
pred = pred != 0
else:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred_dtype))
return _BodyTracer(self, _CondBuilder(pred))
def while_range(self, cond_func):
"""Creates an iterator that continues as long as `cond_func` returns true.
The body is converted to a `lax.while_loop`.
The `grad` transformation does not work.
Usage::
for _ in scope.while_range(lambda: s.loss > 1.e-5):
s.loss = loss(...)
Args:
cond_func: a lambda with no arguments, the condition for the "while".
"""
return _BodyTracer(self, _WhileBuilder(cond_func))
def _push_range(self, range_):
for ar in self._active_ranges:
if ar is range_:
raise ValueError("Range is reused nested inside itself.")
self._active_ranges.append(range_)
def _pop_range(self, range_):
if not (range_ is self._active_ranges[-1]):
self._error_premature_exit_range()
self._active_ranges.pop()
def _error_premature_exit_range(self):
"""Raises error about premature exit from a range"""
msg = "Some ranges have exited prematurely. The innermost such range is at\n{}"
raise ValueError(msg.format(self._active_ranges[-1].location()))
def __getattr__(self, key):
"""Accessor for scope data.
Called only if the attribute is not found, which will happen when we read
scope data that has been stored in self._mutable_state.
"""
mt_val = self._mutable_state.get(key)
if mt_val is None:
raise AttributeError(
"Reading uninitialized data '{}' from the scope.".format(key))
return mt_val
def __setattr__(self, key, value):
"""Update scope data to be functionalized.
Called for *all* attribute setting.
"""
if key in ["_active_ranges", "_mutable_state", "_count_subtraces"]:
object.__setattr__(self, key, value)
else:
if self._active_ranges and key not in self._mutable_state:
raise ValueError(
"New mutable state '{}' cannot be created inside a loop.".format(key))
self._mutable_state[key] = value
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
if self._active_ranges: # We have some ranges that we did not exit properly
self._error_premature_exit_range()
return True
else:
# The exception may come from inside one or more ranges. We let the current
# exception propagate, assuming it terminates the tracing. If not, the
# tracers may be left in an inconsistent state.
return False # re-raise
finally:
# Ensure we leave the global trace_state as we found it
while self._count_subtraces > 0:
self.end_subtrace()
def start_subtrace(self):
"""Starts a nested trace, returns the Trace object."""
# TODO: This follows the __enter__ part of core.new_main.
if config.omnistaging_enabled:
level = core.thread_local_state.trace_state.trace_stack.next_level()
main = core.MainTrace(level, pe.JaxprTrace)
core.thread_local_state.trace_state.trace_stack.push(main)
self._count_subtraces += 1
return pe.JaxprTrace(main, core.cur_sublevel())
else:
level = core.thread_local_state.trace_state.trace_stack.next_level(False)
main = core.MainTrace(level, pe.JaxprTrace)
core.thread_local_state.trace_state.trace_stack.push(main, False)
self._count_subtraces += 1
return pe.JaxprTrace(main, core.cur_sublevel())
def end_subtrace(self):
# TODO: This follows the __exit__ part of core.new_main
if config.omnistaging_enabled:
core.thread_local_state.trace_state.trace_stack.pop()
else:
core.thread_local_state.trace_state.trace_stack.pop(False)
self._count_subtraces -= 1
class _BodyTracer(object):
"""Traces the body of the loop and builds a functional control-flow representation.
This class is also an iterator, only the first iteration is traced.
"""
def __init__(self, scope, loop_builder):
"""
Params:
scope: the current scope
loop_builder: instance of _LoopBuilder
"""
self.scope = scope
self.loop_builder = loop_builder
self.first_iteration = True # If we are tracing the first iteration
# Stack trace, without this line and the s.range function
self.stack = traceback.StackSummary.from_list(
cast(List[Any], traceback.extract_stack()[:-2]))
# Next are state kept from the start of the first iteration to the end of the iteration.
self.carried_state_initial = {}
# The parameters that were created for state upon entering an arbitrary iteration.
self.carried_state_vars = {}
self.trace = None
# List of scope fields carried through the loop
self.carried_state_names = None
self.init_tree = None # The PyTreeDef corresponding to carried_state_names
self.init_vals = None # The values corresponding to self.init_tree
def location(self):
"""A multiline string representing the source location of the range."""
if self.stack is not None:
return " ".join(self.stack.format())
else:
return ""
def __iter__(self):
"""Called before starting the first iteration."""
self.first_iteration = True # In case we reuse the range
return self
def __next__(self):
if self.first_iteration:
self.first_iteration = False
self.scope._push_range(self)
self.start_tracing_body()
return self._index_var
else:
self.end_tracing_body()
self.scope._pop_range(self)
raise StopIteration # Trace only one iteration.
def next(self): # For PY2
return self.__next__()
def start_tracing_body(self):
"""Called upon starting the tracing of the loop body."""
# Make a copy of the current value of the mutable state
self.carried_state_initial = copy.copy(self.scope._mutable_state)
# The entire state is carried.
self.carried_state_names = sorted(self.scope._mutable_state.keys())
# TODO: This is the first part of partial_eval.trace_to_subjaxpr. Share.
self.trace = self.scope.start_subtrace()
# Set the scope._mutable_state to new tracing variables.
for key, initial in self.carried_state_initial.items():
mt_aval = _BodyTracer.abstractify(initial)
mt_pval = pe.PartialVal.unknown(mt_aval)
mt_var = self.trace.new_arg(mt_pval)
self.carried_state_vars[key] = mt_var
self.scope._mutable_state[key] = mt_var
index_var_aval = _BodyTracer.abstractify(0)
index_var_pval = pe.PartialVal.unknown(index_var_aval)
self._index_var = self.trace.new_arg(index_var_pval)
def end_tracing_body(self):
"""Called when we are done tracing one iteration of the body."""
# We will turn the body of the loop into a function that takes some values
# for the scope state (carried_state_names) and returns the values for the
# same state fields after one execution of the body. For some of the ranges,
# e.g., scope.range, the function will also take the index_var as last parameter.
in_tracers = [self.carried_state_vars[ms] for ms in self.carried_state_names]
if self.loop_builder.can_use_index_var():
in_tracers += [self._index_var]
# Make the jaxpr for the body of the loop
# TODO: See which mutable state was changed in the one iteration.
# For now, we assume all state changes.
body_out_tracers = tuple([self.scope._mutable_state[ms]
for ms in self.carried_state_names])
try:
# If the body actually uses the index variable, and is not allowed to
# (e.g., cond_range and while_range), then in_tracers will not contain
# the tracer for the index_var, and trace_to_jaxpr_finalize will throw
# an assertion error.
body_closed_jaxpr, body_const_vals = _BodyTracer.trace_to_jaxpr_finalize(
in_tracers=in_tracers,
out_tracers=body_out_tracers,
trace=self.trace)
except core.UnexpectedTracerError as e:
if "Tracer not among input tracers" in str(e):
raise ValueError("Body of cond_range or while_range should not use the "
"index variable returned by iterator.") from e
raise
# End the subtrace for the loop body, before we trace the condition
self.scope.end_subtrace()
carried_init_val = tuple([self.carried_state_initial[ms]
for ms in self.carried_state_names])
carried_init_vals, carried_tree = tree_util.tree_flatten(carried_init_val)
carried_out_vals = self.loop_builder.build_output_vals(
self.scope, self.carried_state_names, carried_tree,
carried_init_vals, body_closed_jaxpr, body_const_vals)
carried_mutable_state_unflattened = tree_util.tree_unflatten(carried_tree,
carried_out_vals)
# Update the mutable state with the values of the changed vars, after the loop.
for ms, mv in zip(self.carried_state_names, carried_mutable_state_unflattened):
self.scope._mutable_state[ms] = mv
@staticmethod
def abstractify(x):
return abstract_arrays.raise_to_shaped(core.get_aval(x))
@staticmethod
def trace_to_jaxpr_finalize(in_tracers, out_tracers, trace, instantiate=True):
# TODO: This is the final part of the partial_eval.trace_to_subjaxpr. Share.
instantiate = [instantiate] * len(out_tracers)
out_tracers = safe_map(trace.full_raise, safe_map(core.full_lower, out_tracers))
out_tracers = safe_map(partial(pe.instantiate_const_at, trace),
instantiate, out_tracers)
jaxpr, consts, env = pe.tracers_to_jaxpr(in_tracers, out_tracers)
assert not env # TODO: this is from partial_eval.trace_to_jaxpr. Share.
closed_jaxpr = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ())
return closed_jaxpr, consts
class _LoopBuilder(object):
"""Abstract superclass for the loop builders"""
def can_use_index_var(self):
"""Whether this kind of loop can use the index var returned by the range iterator."""
raise NotImplementedError
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
"""Builds the output values for the loop carried state.
Params:
scope: the current Scope object.
carried_state_names: the list of names of mutable state fields that is
carried through the body.
carried_tree: the PyTreeDef for the tuple of carried_state_names.
init_vals: the initial values on body entry corresponding to the init_tree.
body_closed_jaxpr: the Jaxpr for the body returning the new values of
carried_state_names.
body_const_vals: the constant values for the body.
Returns:
the output tracer corresponding to the lax primitive representing the loop.
"""
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class _BoundedLoopBuilder(_LoopBuilder):
"""Builds a lax operation corresponding to a bounded range iteration."""
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
self._index_var = None # The parameter for the index variable
def can_use_index_var(self):
return True
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
arange_val = jnp.arange(self.start, stop=self.stop, step=self.step)
return lax_control_flow.scan_p.bind(*itertools.chain(body_const_vals,
init_vals, [arange_val]),
reverse=False, length=arange_val.shape[0],
jaxpr=body_closed_jaxpr,
num_consts=len(body_const_vals),
num_carry=len(init_vals),
linear=(False,) * (len(body_const_vals) +
len(init_vals) + 1),
unroll=1)
class _CondBuilder(_LoopBuilder):
"""Builds a lax.cond operation."""
def __init__(self, pred):
self.index = lax.convert_element_type(pred, np.int32)
def can_use_index_var(self):
return False
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
# Simulate a pass-through false branch
in_vals, in_tree = tree_util.tree_flatten(
(body_const_vals, tree_util.tree_unflatten(carried_tree, init_vals)))
in_avals = safe_map(_BodyTracer.abstractify, in_vals)
pass_through_closed_jaxpr, pass_through_const_vals, _ = (
lax_control_flow._initial_style_jaxpr(
lambda *args: args[1],
in_tree,
tuple(in_avals)))
assert len(pass_through_const_vals) == 0
args = list(itertools.chain(body_const_vals, init_vals))
return lax_control_flow.cond_p.bind(
self.index, *args,
branches=(pass_through_closed_jaxpr, body_closed_jaxpr),
linear=(False,) * len(args))
class _WhileBuilder(_LoopBuilder):
"""Builds a lax.while operation."""
def __init__(self, cond_func):
self.cond_func = cond_func # Function with 0 arguments (can reference the scope)
def can_use_index_var(self):
return False
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
# Trace the conditional function. cond_func takes 0 arguments, but
# for lax.while we need a conditional function that takes the
# carried_state_names. _initial_style_jaxpr will start its own trace and
# will create tracers for all the carried state. We must put these values
# in the scope._mutable_state before we trace the conditional
# function.
def cond_func_wrapped(*args):
assert len(args) == len(carried_state_names)
for ms, init_ms in zip(carried_state_names, args):
scope._mutable_state[ms] = init_ms
res = self.cond_func()
# Conditional function is not allowed to modify the scope state
for ms, init_ms in zip(carried_state_names, args):
if not (scope._mutable_state[ms] is init_ms):
msg = "Conditional function modifies scope.{} field."
raise ValueError(msg.format(ms))
return res
init_avals = safe_map(_BodyTracer.abstractify, init_vals)
cond_jaxpr, cond_consts, cond_tree = (
lax_control_flow._initial_style_jaxpr(cond_func_wrapped,
carried_tree,
tuple(init_avals)))
# TODO: share these checks with lax_control_flow.while
if not tree_util.treedef_is_leaf(cond_tree):
msg = "cond_fun must return a boolean scalar, but got pytree {}."
raise TypeError(msg.format(cond_tree))
if cond_jaxpr.out_avals != [abstract_arrays.ShapedArray((), np.bool_)]:
msg = "cond_fun must return a boolean scalar, but got output type(s) {}."
raise TypeError(msg.format(cond_jaxpr.out_avals))
return lax_control_flow.while_p.bind(*itertools.chain(cond_consts,
body_const_vals,
init_vals),
cond_nconsts=len(cond_consts),
cond_jaxpr=cond_jaxpr,
body_nconsts=len(body_const_vals),
body_jaxpr=body_closed_jaxpr)
| 40.256944 | 92 | 0.681732 |
import copy
from functools import partial
import itertools
import numpy as np
import traceback
from typing import Any, List, cast
from jax import abstract_arrays
from jax import lax, core
from jax._src.lax import control_flow as lax_control_flow
from jax import tree_util
from jax import numpy as jnp
from jax.interpreters import partial_eval as pe
from jax.util import safe_map
from jax.config import config
class Scope(object):
def __init__(self):
self._mutable_state = {}
self._active_ranges = []
self._count_subtraces = 0
def range(self, first, second=None, third=None):
if third is not None:
start = int(first)
stop = int(second)
step = int(third)
else:
step = 1
if second is not None:
start = int(first)
stop = int(second)
else:
start = 0
stop = int(first)
return _BodyTracer(self, _BoundedLoopBuilder(start, stop, step))
def cond_range(self, pred):
if len(np.shape(pred)) != 0:
raise TypeError(
"Pred must be a scalar, got {} of shape {}.".format(pred, np.shape(pred)))
try:
pred_dtype = np.result_type(pred)
except TypeError as err:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred)) from err
if pred_dtype.kind != 'b':
if pred_dtype.kind in 'iuf':
pred = pred != 0
else:
msg = ("Pred type must be either boolean or number, got {}.")
raise TypeError(msg.format(pred_dtype))
return _BodyTracer(self, _CondBuilder(pred))
def while_range(self, cond_func):
return _BodyTracer(self, _WhileBuilder(cond_func))
def _push_range(self, range_):
for ar in self._active_ranges:
if ar is range_:
raise ValueError("Range is reused nested inside itself.")
self._active_ranges.append(range_)
def _pop_range(self, range_):
if not (range_ is self._active_ranges[-1]):
self._error_premature_exit_range()
self._active_ranges.pop()
def _error_premature_exit_range(self):
msg = "Some ranges have exited prematurely. The innermost such range is at\n{}"
raise ValueError(msg.format(self._active_ranges[-1].location()))
def __getattr__(self, key):
mt_val = self._mutable_state.get(key)
if mt_val is None:
raise AttributeError(
"Reading uninitialized data '{}' from the scope.".format(key))
return mt_val
def __setattr__(self, key, value):
if key in ["_active_ranges", "_mutable_state", "_count_subtraces"]:
object.__setattr__(self, key, value)
else:
if self._active_ranges and key not in self._mutable_state:
raise ValueError(
"New mutable state '{}' cannot be created inside a loop.".format(key))
self._mutable_state[key] = value
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type is None:
if self._active_ranges:
self._error_premature_exit_range()
return True
else:
return False
finally:
while self._count_subtraces > 0:
self.end_subtrace()
def start_subtrace(self):
if config.omnistaging_enabled:
level = core.thread_local_state.trace_state.trace_stack.next_level()
main = core.MainTrace(level, pe.JaxprTrace)
core.thread_local_state.trace_state.trace_stack.push(main)
self._count_subtraces += 1
return pe.JaxprTrace(main, core.cur_sublevel())
else:
level = core.thread_local_state.trace_state.trace_stack.next_level(False)
main = core.MainTrace(level, pe.JaxprTrace)
core.thread_local_state.trace_state.trace_stack.push(main, False)
self._count_subtraces += 1
return pe.JaxprTrace(main, core.cur_sublevel())
def end_subtrace(self):
if config.omnistaging_enabled:
core.thread_local_state.trace_state.trace_stack.pop()
else:
core.thread_local_state.trace_state.trace_stack.pop(False)
self._count_subtraces -= 1
class _BodyTracer(object):
def __init__(self, scope, loop_builder):
self.scope = scope
self.loop_builder = loop_builder
self.first_iteration = True
self.stack = traceback.StackSummary.from_list(
cast(List[Any], traceback.extract_stack()[:-2]))
self.carried_state_initial = {}
self.carried_state_vars = {}
self.trace = None
self.carried_state_names = None
self.init_tree = None
self.init_vals = None
def location(self):
if self.stack is not None:
return " ".join(self.stack.format())
else:
return ""
def __iter__(self):
self.first_iteration = True
return self
def __next__(self):
if self.first_iteration:
self.first_iteration = False
self.scope._push_range(self)
self.start_tracing_body()
return self._index_var
else:
self.end_tracing_body()
self.scope._pop_range(self)
raise StopIteration
def next(self):
return self.__next__()
def start_tracing_body(self):
self.carried_state_initial = copy.copy(self.scope._mutable_state)
self.carried_state_names = sorted(self.scope._mutable_state.keys())
self.trace = self.scope.start_subtrace()
for key, initial in self.carried_state_initial.items():
mt_aval = _BodyTracer.abstractify(initial)
mt_pval = pe.PartialVal.unknown(mt_aval)
mt_var = self.trace.new_arg(mt_pval)
self.carried_state_vars[key] = mt_var
self.scope._mutable_state[key] = mt_var
index_var_aval = _BodyTracer.abstractify(0)
index_var_pval = pe.PartialVal.unknown(index_var_aval)
self._index_var = self.trace.new_arg(index_var_pval)
def end_tracing_body(self):
in_tracers = [self.carried_state_vars[ms] for ms in self.carried_state_names]
if self.loop_builder.can_use_index_var():
in_tracers += [self._index_var]
body_out_tracers = tuple([self.scope._mutable_state[ms]
for ms in self.carried_state_names])
try:
body_closed_jaxpr, body_const_vals = _BodyTracer.trace_to_jaxpr_finalize(
in_tracers=in_tracers,
out_tracers=body_out_tracers,
trace=self.trace)
except core.UnexpectedTracerError as e:
if "Tracer not among input tracers" in str(e):
raise ValueError("Body of cond_range or while_range should not use the "
"index variable returned by iterator.") from e
raise
self.scope.end_subtrace()
carried_init_val = tuple([self.carried_state_initial[ms]
for ms in self.carried_state_names])
carried_init_vals, carried_tree = tree_util.tree_flatten(carried_init_val)
carried_out_vals = self.loop_builder.build_output_vals(
self.scope, self.carried_state_names, carried_tree,
carried_init_vals, body_closed_jaxpr, body_const_vals)
carried_mutable_state_unflattened = tree_util.tree_unflatten(carried_tree,
carried_out_vals)
for ms, mv in zip(self.carried_state_names, carried_mutable_state_unflattened):
self.scope._mutable_state[ms] = mv
@staticmethod
def abstractify(x):
return abstract_arrays.raise_to_shaped(core.get_aval(x))
@staticmethod
def trace_to_jaxpr_finalize(in_tracers, out_tracers, trace, instantiate=True):
instantiate = [instantiate] * len(out_tracers)
out_tracers = safe_map(trace.full_raise, safe_map(core.full_lower, out_tracers))
out_tracers = safe_map(partial(pe.instantiate_const_at, trace),
instantiate, out_tracers)
jaxpr, consts, env = pe.tracers_to_jaxpr(in_tracers, out_tracers)
assert not env
closed_jaxpr = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ())
return closed_jaxpr, consts
class _LoopBuilder(object):
def can_use_index_var(self):
raise NotImplementedError
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class _BoundedLoopBuilder(_LoopBuilder):
def __init__(self, start, stop, step):
self.start = start
self.stop = stop
self.step = step
self._index_var = None
def can_use_index_var(self):
return True
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
arange_val = jnp.arange(self.start, stop=self.stop, step=self.step)
return lax_control_flow.scan_p.bind(*itertools.chain(body_const_vals,
init_vals, [arange_val]),
reverse=False, length=arange_val.shape[0],
jaxpr=body_closed_jaxpr,
num_consts=len(body_const_vals),
num_carry=len(init_vals),
linear=(False,) * (len(body_const_vals) +
len(init_vals) + 1),
unroll=1)
class _CondBuilder(_LoopBuilder):
def __init__(self, pred):
self.index = lax.convert_element_type(pred, np.int32)
def can_use_index_var(self):
return False
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
in_vals, in_tree = tree_util.tree_flatten(
(body_const_vals, tree_util.tree_unflatten(carried_tree, init_vals)))
in_avals = safe_map(_BodyTracer.abstractify, in_vals)
pass_through_closed_jaxpr, pass_through_const_vals, _ = (
lax_control_flow._initial_style_jaxpr(
lambda *args: args[1],
in_tree,
tuple(in_avals)))
assert len(pass_through_const_vals) == 0
args = list(itertools.chain(body_const_vals, init_vals))
return lax_control_flow.cond_p.bind(
self.index, *args,
branches=(pass_through_closed_jaxpr, body_closed_jaxpr),
linear=(False,) * len(args))
class _WhileBuilder(_LoopBuilder):
def __init__(self, cond_func):
self.cond_func = cond_func
def can_use_index_var(self):
return False
def build_output_vals(self, scope, carried_state_names, carried_tree,
init_vals, body_closed_jaxpr, body_const_vals):
def cond_func_wrapped(*args):
assert len(args) == len(carried_state_names)
for ms, init_ms in zip(carried_state_names, args):
scope._mutable_state[ms] = init_ms
res = self.cond_func()
for ms, init_ms in zip(carried_state_names, args):
if not (scope._mutable_state[ms] is init_ms):
msg = "Conditional function modifies scope.{} field."
raise ValueError(msg.format(ms))
return res
init_avals = safe_map(_BodyTracer.abstractify, init_vals)
cond_jaxpr, cond_consts, cond_tree = (
lax_control_flow._initial_style_jaxpr(cond_func_wrapped,
carried_tree,
tuple(init_avals)))
if not tree_util.treedef_is_leaf(cond_tree):
msg = "cond_fun must return a boolean scalar, but got pytree {}."
raise TypeError(msg.format(cond_tree))
if cond_jaxpr.out_avals != [abstract_arrays.ShapedArray((), np.bool_)]:
msg = "cond_fun must return a boolean scalar, but got output type(s) {}."
raise TypeError(msg.format(cond_jaxpr.out_avals))
return lax_control_flow.while_p.bind(*itertools.chain(cond_consts,
body_const_vals,
init_vals),
cond_nconsts=len(cond_consts),
cond_jaxpr=cond_jaxpr,
body_nconsts=len(body_const_vals),
body_jaxpr=body_closed_jaxpr)
| true | true |
f733841869f54feb20f297d8835240a7d14283ae | 6,767 | py | Python | python/ccxt/paymium.py | born2net/ccxt | 9995e50ca28513b9a68f774a3517f2c396cc0001 | [
"MIT"
] | null | null | null | python/ccxt/paymium.py | born2net/ccxt | 9995e50ca28513b9a68f774a3517f2c396cc0001 | [
"MIT"
] | null | null | null | python/ccxt/paymium.py | born2net/ccxt | 9995e50ca28513b9a68f774a3517f2c396cc0001 | [
"MIT"
] | 1 | 2018-08-09T18:11:13.000Z | 2018-08-09T18:11:13.000Z | # -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class paymium (Exchange):
def describe(self):
return self.deep_extend(super(paymium, self).describe(), {
'id': 'paymium',
'name': 'Paymium',
'countries': ['FR', 'EU'],
'rateLimit': 2000,
'version': 'v1',
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27790564-a945a9d4-5ff9-11e7-9d2d-b635763f2f24.jpg',
'api': 'https://paymium.com/api',
'www': 'https://www.paymium.com',
'doc': [
'https://github.com/Paymium/api-documentation',
'https://www.paymium.com/page/developers',
],
},
'api': {
'public': {
'get': [
'countries',
'data/{id}/ticker',
'data/{id}/trades',
'data/{id}/depth',
'bitcoin_charts/{id}/trades',
'bitcoin_charts/{id}/depth',
],
},
'private': {
'get': [
'merchant/get_payment/{UUID}',
'user',
'user/addresses',
'user/addresses/{btc_address}',
'user/orders',
'user/orders/{UUID}',
'user/price_alerts',
],
'post': [
'user/orders',
'user/addresses',
'user/payment_requests',
'user/price_alerts',
'merchant/create_payment',
],
'delete': [
'user/orders/{UUID}/cancel',
'user/price_alerts/{id}',
],
},
},
'markets': {
'BTC/EUR': {'id': 'eur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR'},
},
})
def fetch_balance(self, params={}):
balances = self.privateGetUser()
result = {'info': balances}
for c in range(0, len(self.currencies)):
currency = self.currencies[c]
lowercase = currency.lower()
account = self.account()
balance = 'balance_' + lowercase
locked = 'locked_' + lowercase
if balance in balances:
account['free'] = balances[balance]
if locked in balances:
account['used'] = balances[locked]
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
orderbook = self.publicGetDataIdDepth(self.extend({
'id': self.market_id(symbol),
}, params))
result = self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'amount')
result['bids'] = self.sort_by(result['bids'], 0, True)
return result
def fetch_ticker(self, symbol, params={}):
ticker = self.publicGetDataIdTicker(self.extend({
'id': self.market_id(symbol),
}, params))
timestamp = ticker['at'] * 1000
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': float(ticker['vwap']),
'open': float(ticker['open']),
'close': None,
'first': None,
'last': float(ticker['price']),
'change': None,
'percentage': float(ticker['variation']),
'average': None,
'baseVolume': None,
'quoteVolume': float(ticker['volume']),
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = int(trade['created_at_int']) * 1000
volume = 'traded_' + market['base'].lower()
return {
'info': trade,
'id': trade['uuid'],
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['side'],
'price': trade['price'],
'amount': trade[volume],
}
def fetch_trades(self, symbol, params={}):
market = self.market(symbol)
response = self.publicGetDataIdTrades(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response, market)
def create_order(self, market, type, side, amount, price=None, params={}):
order = {
'type': self.capitalize(type) + 'Order',
'currency': self.market_id(market),
'direction': side,
'amount': amount,
}
if type == 'market':
order['price'] = price
response = self.privatePostUserOrders(self.extend(order, params))
return {
'info': response,
'id': response['uuid'],
}
def cancel_order(self, id, symbol=None, params={}):
return self.privatePostCancelOrder(self.extend({
'orderNumber': id,
}, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
body = self.json(params)
nonce = str(self.nonce())
auth = nonce + url + body
headers = {
'Api-Key': self.apiKey,
'Api-Signature': self.hmac(self.encode(auth), self.secret),
'Api-Nonce': nonce,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'errors' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 37.181319 | 126 | 0.468745 |
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class paymium (Exchange):
def describe(self):
return self.deep_extend(super(paymium, self).describe(), {
'id': 'paymium',
'name': 'Paymium',
'countries': ['FR', 'EU'],
'rateLimit': 2000,
'version': 'v1',
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27790564-a945a9d4-5ff9-11e7-9d2d-b635763f2f24.jpg',
'api': 'https://paymium.com/api',
'www': 'https://www.paymium.com',
'doc': [
'https://github.com/Paymium/api-documentation',
'https://www.paymium.com/page/developers',
],
},
'api': {
'public': {
'get': [
'countries',
'data/{id}/ticker',
'data/{id}/trades',
'data/{id}/depth',
'bitcoin_charts/{id}/trades',
'bitcoin_charts/{id}/depth',
],
},
'private': {
'get': [
'merchant/get_payment/{UUID}',
'user',
'user/addresses',
'user/addresses/{btc_address}',
'user/orders',
'user/orders/{UUID}',
'user/price_alerts',
],
'post': [
'user/orders',
'user/addresses',
'user/payment_requests',
'user/price_alerts',
'merchant/create_payment',
],
'delete': [
'user/orders/{UUID}/cancel',
'user/price_alerts/{id}',
],
},
},
'markets': {
'BTC/EUR': {'id': 'eur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR'},
},
})
def fetch_balance(self, params={}):
balances = self.privateGetUser()
result = {'info': balances}
for c in range(0, len(self.currencies)):
currency = self.currencies[c]
lowercase = currency.lower()
account = self.account()
balance = 'balance_' + lowercase
locked = 'locked_' + lowercase
if balance in balances:
account['free'] = balances[balance]
if locked in balances:
account['used'] = balances[locked]
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
orderbook = self.publicGetDataIdDepth(self.extend({
'id': self.market_id(symbol),
}, params))
result = self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'amount')
result['bids'] = self.sort_by(result['bids'], 0, True)
return result
def fetch_ticker(self, symbol, params={}):
ticker = self.publicGetDataIdTicker(self.extend({
'id': self.market_id(symbol),
}, params))
timestamp = ticker['at'] * 1000
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': float(ticker['vwap']),
'open': float(ticker['open']),
'close': None,
'first': None,
'last': float(ticker['price']),
'change': None,
'percentage': float(ticker['variation']),
'average': None,
'baseVolume': None,
'quoteVolume': float(ticker['volume']),
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = int(trade['created_at_int']) * 1000
volume = 'traded_' + market['base'].lower()
return {
'info': trade,
'id': trade['uuid'],
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['side'],
'price': trade['price'],
'amount': trade[volume],
}
def fetch_trades(self, symbol, params={}):
market = self.market(symbol)
response = self.publicGetDataIdTrades(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response, market)
def create_order(self, market, type, side, amount, price=None, params={}):
order = {
'type': self.capitalize(type) + 'Order',
'currency': self.market_id(market),
'direction': side,
'amount': amount,
}
if type == 'market':
order['price'] = price
response = self.privatePostUserOrders(self.extend(order, params))
return {
'info': response,
'id': response['uuid'],
}
def cancel_order(self, id, symbol=None, params={}):
return self.privatePostCancelOrder(self.extend({
'orderNumber': id,
}, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
body = self.json(params)
nonce = str(self.nonce())
auth = nonce + url + body
headers = {
'Api-Key': self.apiKey,
'Api-Signature': self.hmac(self.encode(auth), self.secret),
'Api-Nonce': nonce,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'errors' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| true | true |
f73384e456231ad7a7678d4ca1fe73ee0e67c76d | 15,512 | py | Python | 04dnn_hmm/02_train_dnn.py | ko-ya346/python_asr | 251d8a4ff810fbeb5f7b63229139944195ab7cb5 | [
"MIT"
] | null | null | null | 04dnn_hmm/02_train_dnn.py | ko-ya346/python_asr | 251d8a4ff810fbeb5f7b63229139944195ab7cb5 | [
"MIT"
] | null | null | null | 04dnn_hmm/02_train_dnn.py | ko-ya346/python_asr | 251d8a4ff810fbeb5f7b63229139944195ab7cb5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# DNNを学習します.
#
# Pytorchを用いた処理に必要なモジュールをインポート
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim
# 作成したDatasetクラスをインポート
from my_dataset import SequenceDataset
# 数値演算用モジュール(numpy)をインポート
import numpy as np
# プロット用モジュール(matplotlib)をインポート
import matplotlib.pyplot as plt
# hmmfunc.pyからMonoPhoneHMMクラスをインポート
from hmmfunc import MonoPhoneHMM
# モデルの定義をインポート
from my_model import MyDNN
# json形式の入出力を行うモジュールをインポート
import json
# os, sys, shutilモジュールをインポート
import os
import sys
import shutil
#
# メイン関数
#
if __name__ == "__main__":
#
# 設定ここから
#
# 訓練データの特徴量リスト
train_feat_scp = \
'../01compute_features/mfcc/train_small/feats.scp'
# 訓練データのラベル(アライメント)ファイル
train_label_file = \
'./exp/data/train_small/alignment'
# 訓練データから計算された
# 特徴量の平均/標準偏差ファイル
mean_std_file = \
'../01compute_features/mfcc/train_small/mean_std.txt'
# 開発データの特徴量リスト
dev_feat_scp = \
'../01compute_features/mfcc/dev/feats.scp'
# 開発データのラベル(アライメント)ファイル
dev_label_file = \
'./exp/data/dev/alignment'
# HMMファイル
# HMMファイルは音素数と状態数の
# 情報を得るためだけに使う
hmm_file = '../03gmm_hmm/exp/model_3state_2mix/10.hmm'
# 学習結果を出力するディレクトリ
output_dir = os.path.join('exp', 'model_dnn')
# ミニバッチに含める発話数
batch_size = 5
# 最大エポック数
max_num_epoch = 60
# 中間層のレイヤー数
num_layers = 4
# 中間層の次元数
hidden_dim = 1024
# splice: 前後 n フレームの特徴量を結合する
# 次元数は(splice*2+1)倍になる
splice = 5
# 初期学習率
initial_learning_rate = 0.008
# 学習率の減衰やEarly stoppingの
# 判定を開始するエポック数
# (= 最低限このエポックまではどれだけ
# validation結果が悪くても学習を続ける)
lr_decay_start_epoch = 7
# 学習率を減衰する割合
# (減衰後学習率 <- 現在の学習率*lr_decay_factor)
# 1.0以上なら,減衰させない
lr_decay_factor = 0.5
# Early stoppingの閾値
# 最低損失値を更新しない場合が
# 何エポック続けば学習を打ち切るか
early_stop_threshold = 3
#
# 設定ここまで
#
# 出力ディレクトリが存在しない場合は作成する
os.makedirs(output_dir, exist_ok=True)
# 設定を辞書形式にする
config = {'num_layers': num_layers,
'hidden_dim': hidden_dim,
'splice': splice,
'batch_size': batch_size,
'max_num_epoch': max_num_epoch,
'initial_learning_rate': initial_learning_rate,
'lr_decay_start_epoch': lr_decay_start_epoch,
'lr_decay_factor': lr_decay_factor,
'early_stop_threshold': early_stop_threshold}
# 設定をJSON形式で保存する
conf_file = os.path.join(output_dir, 'config.json')
with open(conf_file, mode='w') as f:
json.dump(config, f, indent=4)
# 特徴量の平均/標準偏差ファイルを読み込む
with open(mean_std_file, mode='r') as f:
# 全行読み込み
lines = f.readlines()
# 1行目(0始まり)が平均値ベクトル(mean),
# 3行目が標準偏差ベクトル(std)
mean_line = lines[1]
std_line = lines[3]
# スペース区切りのリストに変換
feat_mean = mean_line.split()
feat_std = std_line.split()
# numpy arrayに変換
feat_mean = np.array(feat_mean,
dtype=np.float32)
feat_std = np.array(feat_std,
dtype=np.float32)
# 平均/標準偏差ファイルをコピーする
shutil.copyfile(mean_std_file,
os.path.join(output_dir, 'mean_std.txt'))
# 次元数の情報を得る
feat_dim = np.size(feat_mean)
# DNNの出力層の次元数を得るために,
# HMMの音素数と状態数を得る
# MonoPhoneHMMクラスを呼び出す
hmm = MonoPhoneHMM()
# HMMを読み込む
hmm.load_hmm(hmm_file)
# DNNの出力層の次元数は音素数x状態数
dim_out = hmm.num_phones * hmm.num_states
# バッチデータ作成の際にラベルを埋める値
# はdim_out以上の値にする
pad_index = dim_out
# ニューラルネットワークモデルを作成する
# 入力特徴量の次元数は
# feat_dim * (2*splice+1)
dim_in = feat_dim * (2*splice+1)
model = MyDNN(dim_in=dim_in,
dim_hidden=hidden_dim,
dim_out=dim_out,
num_layers=num_layers)
print(model)
# オプティマイザを定義
# ここでは momentum stochastic gradient descent
# を使用
optimizer = optim.SGD(model.parameters(),
lr=initial_learning_rate,
momentum=0.99)
# 訓練データのデータセットを作成する
# padding_indexはdim_out以上の値に設定する
train_dataset = SequenceDataset(train_feat_scp,
train_label_file,
feat_mean,
feat_std,
pad_index,
splice)
# 開発データのデータセットを作成する
dev_dataset = SequenceDataset(dev_feat_scp,
dev_label_file,
feat_mean,
feat_std,
pad_index,
splice)
# 訓練データのDataLoaderを呼び出す
# 訓練データはシャッフルして用いる
# (num_workerは大きい程処理が速くなりますが,
# PCに負担が出ます.PCのスペックに応じて
# 設定してください)
train_loader = DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
# 開発データのDataLoaderを呼び出す
# 開発データはデータはシャッフルしない
dev_loader = DataLoader(dev_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=4)
# クロスエントロピーを損失関数として用いる
criterion = \
nn.CrossEntropyLoss(ignore_index=pad_index)
# CUDAが使える場合はモデルパラメータをGPUに,
# そうでなければCPUに配置する
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
model = model.to(device)
# モデルをトレーニングモードに設定する
model.train()
# 訓練データの処理と開発データの処理を
# for でシンプルに記述するために,辞書データ化しておく
dataset_loader = {'train': train_loader,
'validation': dev_loader}
# 各エポックにおける損失値と誤り率の履歴
loss_history = {'train': [],
'validation': []}
error_history = {'train': [],
'validation': []}
# 本プログラムでは,validation時の損失値が
# 最も低かったモデルを保存する.
# そのため,最も低い損失値,
# そのときのモデルとエポック数を記憶しておく
best_loss = -1
best_model = None
best_epoch = 0
# Early stoppingフラグ.Trueになると学習を打ち切る
early_stop_flag = False
# Early stopping判定用(損失値の最低値が
# 更新されないエポックが何回続いているか)のカウンタ
counter_for_early_stop = 0
# ログファイルの準備
log_file = open(os.path.join(output_dir,
'log.txt'),
mode='w')
log_file.write('epoch\ttrain loss\t'\
'train err\tvalid loss\tvalid err')
# エポックの数だけループ
for epoch in range(max_num_epoch):
# early stopフラグが立っている場合は,
# 学習を打ち切る
if early_stop_flag:
print(' Early stopping.'\
' (early_stop_threshold = %d)' \
% (early_stop_threshold))
log_file.write('\n Early stopping.'\
' (early_stop_threshold = %d)' \
% (early_stop_threshold))
break
# エポック数を表示
print('epoch %d/%d:' % (epoch+1, max_num_epoch))
log_file.write('\n%d\t' % (epoch+1))
# trainフェーズとvalidationフェーズを交互に実施する
for phase in ['train', 'validation']:
# このエポックにおける累積損失値と発話数
total_loss = 0
total_utt = 0
# このエポックにおける累積認識誤り文字数と総文字数
total_error = 0
total_frames = 0
# 各フェーズのDataLoaderから1ミニバッチ
# ずつ取り出して処理する.
# これを全ミニバッチ処理が終わるまで繰り返す.
# ミニバッチに含まれるデータは,
# 音声特徴量,ラベル,フレーム数,
# ラベル長,発話ID
for (features, labels, feat_len,
label_len, utt_ids) \
in dataset_loader[phase]:
# CUDAが使える場合はデータをGPUに,
# そうでなければCPUに配置する
features, labels = \
features.to(device), labels.to(device)
# 勾配をリセット
optimizer.zero_grad()
# モデルの出力を計算(フォワード処理)
outputs = model(features)
# この時点でoutputsは
# [バッチサイズ, フレーム数, ラベル数]
# の3次元テンソル.
# CrossEntropyLossを使うためには
# [サンプル数, ラベル数]の2次元テンソル
# にする必要があるので,viewを使って
# 変形する
b_size, f_size, _ = outputs.size()
outputs = outputs.view(b_size * f_size,
dim_out)
# labelsは[バッチサイズ, フレーム]の
# 2次元テンソル.
# CrossEntropyLossを使うためには
# [サンプル数]の1次元テンソルにする
# 必要があるので.viewを使って変形する.
# 1次元への変形はview(-1)で良い.
# (view(b_size*f_size)でも良い)
labels = labels.view(-1)
# 損失値を計算する.
loss = criterion(outputs, labels)
# 訓練フェーズの場合は,
# 誤差逆伝搬を実行し,
# モデルパラメータを更新する
if phase == 'train':
# 勾配を計算する
loss.backward()
# オプティマイザにより,
# パラメータを更新する
optimizer.step()
# 損失値を累積する
total_loss += loss.item()
# 処理した発話数をカウントする
total_utt += b_size
#
# フレーム単位の誤り率を計算する
#
# 推定ラベルを得る
_, hyp = torch.max(outputs, 1)
# ラベルにpad_indexを埋めた
# フレームを取り除く
hyp = hyp[labels != pad_index]
ref = labels[labels != pad_index]
# 推定ラベルと正解ラベルが不一致な
# フレーム数を得る
error = (hyp != ref).sum()
# 誤りフレーム数を累積する
total_error += error
# 総フレーム数を累積する
total_frames += len(ref)
#
# このフェーズにおいて,1エポック終了
# 損失値,認識エラー率,モデルの保存等を行う
#
# 損失値の累積値を,処理した発話数で割る
epoch_loss = total_loss / total_utt
# 画面とログファイルに出力する
print(' %s loss: %f' \
% (phase, epoch_loss))
log_file.write('%.6f\t' % (epoch_loss))
# 履歴に加える
loss_history[phase].append(epoch_loss)
# 総誤りフレーム数を,総フレーム数で
# 割ってエラー率に換算
epoch_error = 100.0 * total_error \
/ total_frames
# 画面とログファイルに出力する
print(' %s error rate: %f %%' \
% (phase, epoch_error))
log_file.write('%.6f\t' % (epoch_error))
# 履歴に加える
error_history[phase].append(epoch_error)
#
# validationフェーズ特有の処理
#
if phase == 'validation':
if epoch == 0 or best_loss > epoch_loss:
# 損失値が最低値を更新した場合は,
# その時のモデルを保存する
best_loss = epoch_loss
torch.save(model.state_dict(),
output_dir+'/best_model.pt')
best_epoch = epoch
# Early stopping判定用の
# カウンタをリセットする
counter_for_early_stop = 0
else:
# 最低値を更新しておらず,
if epoch+1 >= lr_decay_start_epoch:
# かつlr_decay_start_epoch以上の
# エポックに達している場合
if counter_for_early_stop+1 \
>= early_stop_threshold:
# 更新していないエポックが,
# 閾値回数以上続いている場合,
# Early stopping フラグを立てる
early_stop_flag = True
else:
# Early stopping条件に
# 達していない場合は
# 学習率を減衰させて学習続行
if lr_decay_factor < 1.0:
for i, param_group \
in enumerate(\
optimizer.param_groups):
if i == 0:
lr = param_group['lr']
dlr = lr_decay_factor \
* lr
print(' (Decay '\
'learning rate:'\
' %f -> %f)' \
% (lr, dlr))
log_file.write(\
'(Decay learning'\
' rate: %f -> %f)'\
% (lr, dlr))
param_group['lr'] = dlr
# Early stopping判定用の
# カウンタを増やす
counter_for_early_stop += 1
#
# 全エポック終了
# 学習済みモデルの保存とログの書き込みを行う
#
print('---------------Summary'\
'------------------')
log_file.write('\n---------------Summary'\
'------------------\n')
# 最終エポックのモデルを保存する
torch.save(model.state_dict(),
os.path.join(output_dir,'final_model.pt'))
print('Final epoch model -> %s/final_model.pt' \
% (output_dir))
log_file.write('Final epoch model ->'\
' %s/final_model.pt\n' \
% (output_dir))
# 最終エポックの情報
for phase in ['train', 'validation']:
# 最終エポックの損失値を出力
print(' %s loss: %f' \
% (phase, loss_history[phase][-1]))
log_file.write(' %s loss: %f\n' \
% (phase, loss_history[phase][-1]))
# 最終エポックのエラー率を出力
print(' %s error rate: %f %%' \
% (phase, error_history[phase][-1]))
log_file.write(' %s error rate: %f %%\n' \
% (phase, error_history[phase][-1]))
# ベストエポックの情報
# (validationの損失が最小だったエポック)
print('Best epoch model (%d-th epoch)'\
' -> %s/best_model.pt' \
% (best_epoch+1, output_dir))
log_file.write('Best epoch model (%d-th epoch)'\
' -> %s/best_model.pt\n' \
% (best_epoch+1, output_dir))
for phase in ['train', 'validation']:
# ベストエポックの損失値を出力
print(' %s loss: %f' \
% (phase, loss_history[phase][best_epoch]))
log_file.write(' %s loss: %f\n' \
% (phase, loss_history[phase][best_epoch]))
# ベストエポックのエラー率を出力
print(' %s error rate: %f %%' \
% (phase, error_history[phase][best_epoch]))
log_file.write(' %s error rate: %f %%\n' \
% (phase, error_history[phase][best_epoch]))
# 損失値の履歴(Learning Curve)グラフにして保存する
fig1 = plt.figure()
for phase in ['train', 'validation']:
plt.plot(loss_history[phase],
label=phase+' loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
fig1.legend()
fig1.savefig(output_dir+'/loss.png')
# 認識誤り率の履歴グラフにして保存する
fig2 = plt.figure()
for phase in ['train', 'validation']:
plt.plot(error_history[phase],
label=phase+' error')
plt.xlabel('Epoch')
plt.ylabel('Error [%]')
fig2.legend()
fig2.savefig(output_dir+'/error.png')
# ログファイルを閉じる
log_file.close()
| 30.356164 | 63 | 0.485237 |
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch import optim
from my_dataset import SequenceDataset
import numpy as np
import matplotlib.pyplot as plt
from hmmfunc import MonoPhoneHMM
from my_model import MyDNN
import json
import os
import sys
import shutil
if __name__ == "__main__":
train_feat_scp = \
'../01compute_features/mfcc/train_small/feats.scp'
train_label_file = \
'./exp/data/train_small/alignment'
mean_std_file = \
'../01compute_features/mfcc/train_small/mean_std.txt'
dev_feat_scp = \
'../01compute_features/mfcc/dev/feats.scp'
dev_label_file = \
'./exp/data/dev/alignment'
hmm_file = '../03gmm_hmm/exp/model_3state_2mix/10.hmm'
output_dir = os.path.join('exp', 'model_dnn')
batch_size = 5
max_num_epoch = 60
num_layers = 4
hidden_dim = 1024
splice = 5
initial_learning_rate = 0.008
lr_decay_start_epoch = 7
lr_decay_factor = 0.5
early_stop_threshold = 3
os.makedirs(output_dir, exist_ok=True)
config = {'num_layers': num_layers,
'hidden_dim': hidden_dim,
'splice': splice,
'batch_size': batch_size,
'max_num_epoch': max_num_epoch,
'initial_learning_rate': initial_learning_rate,
'lr_decay_start_epoch': lr_decay_start_epoch,
'lr_decay_factor': lr_decay_factor,
'early_stop_threshold': early_stop_threshold}
conf_file = os.path.join(output_dir, 'config.json')
with open(conf_file, mode='w') as f:
json.dump(config, f, indent=4)
with open(mean_std_file, mode='r') as f:
lines = f.readlines()
mean_line = lines[1]
std_line = lines[3]
feat_mean = mean_line.split()
feat_std = std_line.split()
feat_mean = np.array(feat_mean,
dtype=np.float32)
feat_std = np.array(feat_std,
dtype=np.float32)
shutil.copyfile(mean_std_file,
os.path.join(output_dir, 'mean_std.txt'))
feat_dim = np.size(feat_mean)
hmm = MonoPhoneHMM()
hmm.load_hmm(hmm_file)
dim_out = hmm.num_phones * hmm.num_states
pad_index = dim_out
dim_in = feat_dim * (2*splice+1)
model = MyDNN(dim_in=dim_in,
dim_hidden=hidden_dim,
dim_out=dim_out,
num_layers=num_layers)
print(model)
optimizer = optim.SGD(model.parameters(),
lr=initial_learning_rate,
momentum=0.99)
train_dataset = SequenceDataset(train_feat_scp,
train_label_file,
feat_mean,
feat_std,
pad_index,
splice)
dev_dataset = SequenceDataset(dev_feat_scp,
dev_label_file,
feat_mean,
feat_std,
pad_index,
splice)
train_loader = DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=4)
dev_loader = DataLoader(dev_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=4)
criterion = \
nn.CrossEntropyLoss(ignore_index=pad_index)
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
model = model.to(device)
model.train()
dataset_loader = {'train': train_loader,
'validation': dev_loader}
loss_history = {'train': [],
'validation': []}
error_history = {'train': [],
'validation': []}
best_loss = -1
best_model = None
best_epoch = 0
early_stop_flag = False
counter_for_early_stop = 0
log_file = open(os.path.join(output_dir,
'log.txt'),
mode='w')
log_file.write('epoch\ttrain loss\t'\
'train err\tvalid loss\tvalid err')
for epoch in range(max_num_epoch):
if early_stop_flag:
print(' Early stopping.'\
' (early_stop_threshold = %d)' \
% (early_stop_threshold))
log_file.write('\n Early stopping.'\
' (early_stop_threshold = %d)' \
% (early_stop_threshold))
break
print('epoch %d/%d:' % (epoch+1, max_num_epoch))
log_file.write('\n%d\t' % (epoch+1))
for phase in ['train', 'validation']:
total_loss = 0
total_utt = 0
total_error = 0
total_frames = 0
for (features, labels, feat_len,
label_len, utt_ids) \
in dataset_loader[phase]:
features, labels = \
features.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(features)
b_size, f_size, _ = outputs.size()
outputs = outputs.view(b_size * f_size,
dim_out)
labels = labels.view(-1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
total_loss += loss.item()
total_utt += b_size
_, hyp = torch.max(outputs, 1)
hyp = hyp[labels != pad_index]
ref = labels[labels != pad_index]
error = (hyp != ref).sum()
total_error += error
total_frames += len(ref)
epoch_loss = total_loss / total_utt
print(' %s loss: %f' \
% (phase, epoch_loss))
log_file.write('%.6f\t' % (epoch_loss))
loss_history[phase].append(epoch_loss)
epoch_error = 100.0 * total_error \
/ total_frames
print(' %s error rate: %f %%' \
% (phase, epoch_error))
log_file.write('%.6f\t' % (epoch_error))
error_history[phase].append(epoch_error)
if phase == 'validation':
if epoch == 0 or best_loss > epoch_loss:
best_loss = epoch_loss
torch.save(model.state_dict(),
output_dir+'/best_model.pt')
best_epoch = epoch
counter_for_early_stop = 0
else:
if epoch+1 >= lr_decay_start_epoch:
if counter_for_early_stop+1 \
>= early_stop_threshold:
early_stop_flag = True
else:
if lr_decay_factor < 1.0:
for i, param_group \
in enumerate(\
optimizer.param_groups):
if i == 0:
lr = param_group['lr']
dlr = lr_decay_factor \
* lr
print(' (Decay '\
'learning rate:'\
' %f -> %f)' \
% (lr, dlr))
log_file.write(\
'(Decay learning'\
' rate: %f -> %f)'\
% (lr, dlr))
param_group['lr'] = dlr
counter_for_early_stop += 1
print('---------------Summary'\
'------------------')
log_file.write('\n---------------Summary'\
'------------------\n')
torch.save(model.state_dict(),
os.path.join(output_dir,'final_model.pt'))
print('Final epoch model -> %s/final_model.pt' \
% (output_dir))
log_file.write('Final epoch model ->'\
' %s/final_model.pt\n' \
% (output_dir))
for phase in ['train', 'validation']:
print(' %s loss: %f' \
% (phase, loss_history[phase][-1]))
log_file.write(' %s loss: %f\n' \
% (phase, loss_history[phase][-1]))
print(' %s error rate: %f %%' \
% (phase, error_history[phase][-1]))
log_file.write(' %s error rate: %f %%\n' \
% (phase, error_history[phase][-1]))
print('Best epoch model (%d-th epoch)'\
' -> %s/best_model.pt' \
% (best_epoch+1, output_dir))
log_file.write('Best epoch model (%d-th epoch)'\
' -> %s/best_model.pt\n' \
% (best_epoch+1, output_dir))
for phase in ['train', 'validation']:
print(' %s loss: %f' \
% (phase, loss_history[phase][best_epoch]))
log_file.write(' %s loss: %f\n' \
% (phase, loss_history[phase][best_epoch]))
print(' %s error rate: %f %%' \
% (phase, error_history[phase][best_epoch]))
log_file.write(' %s error rate: %f %%\n' \
% (phase, error_history[phase][best_epoch]))
fig1 = plt.figure()
for phase in ['train', 'validation']:
plt.plot(loss_history[phase],
label=phase+' loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
fig1.legend()
fig1.savefig(output_dir+'/loss.png')
fig2 = plt.figure()
for phase in ['train', 'validation']:
plt.plot(error_history[phase],
label=phase+' error')
plt.xlabel('Epoch')
plt.ylabel('Error [%]')
fig2.legend()
fig2.savefig(output_dir+'/error.png')
log_file.close()
| true | true |
f73386d2c279e30707cf30922d7659e632ef1eb0 | 637 | py | Python | tests/v3_certificate_validation/test_unit_issuance_date.py | KhoiUna/cert-issuer | a51608a98033be4fca88df6d3708c98baba2907c | [
"MIT"
] | 356 | 2016-09-15T18:41:24.000Z | 2022-03-17T19:55:10.000Z | tests/v3_certificate_validation/test_unit_issuance_date.py | KhoiUna/cert-issuer | a51608a98033be4fca88df6d3708c98baba2907c | [
"MIT"
] | 118 | 2016-10-10T20:41:56.000Z | 2022-03-31T15:23:30.000Z | tests/v3_certificate_validation/test_unit_issuance_date.py | KhoiUna/cert-issuer | a51608a98033be4fca88df6d3708c98baba2907c | [
"MIT"
] | 205 | 2016-09-16T17:53:30.000Z | 2022-03-27T18:26:20.000Z | import unittest
from cert_issuer.models import validate_issuance_date
class UnitValidationV3 (unittest.TestCase):
def test_validate_issuance_date_invalid_RFC3339 (self):
candidate = '20200202'
try:
validate_issuance_date(candidate)
except:
assert True
return
assert False
def test_validate_issuance_date_valid_RFC3339 (self):
candidate = '2020-02-02T00:00:00Z'
try:
validate_issuance_date(candidate)
except:
assert False
return
assert True
if __name__ == '__main__':
unittest.main()
| 22.75 | 59 | 0.634223 | import unittest
from cert_issuer.models import validate_issuance_date
class UnitValidationV3 (unittest.TestCase):
def test_validate_issuance_date_invalid_RFC3339 (self):
candidate = '20200202'
try:
validate_issuance_date(candidate)
except:
assert True
return
assert False
def test_validate_issuance_date_valid_RFC3339 (self):
candidate = '2020-02-02T00:00:00Z'
try:
validate_issuance_date(candidate)
except:
assert False
return
assert True
if __name__ == '__main__':
unittest.main()
| true | true |
f733874f4cb4ca7c6f79942d29040359a26a6ba2 | 849 | py | Python | ObjectOrientedPython/StaticAndLocalVariables.py | dsabhrawal/python-examples | 55b3dd6c9fd0b992bcfe3422765dc80fb143a54b | [
"MIT"
] | 1 | 2020-03-01T17:24:20.000Z | 2020-03-01T17:24:20.000Z | ObjectOrientedPython/StaticAndLocalVariables.py | dsabhrawal/python-examples | 55b3dd6c9fd0b992bcfe3422765dc80fb143a54b | [
"MIT"
] | null | null | null | ObjectOrientedPython/StaticAndLocalVariables.py | dsabhrawal/python-examples | 55b3dd6c9fd0b992bcfe3422765dc80fb143a54b | [
"MIT"
] | null | null | null | # Static variables are class level variables
# Static variables are always referenced by class name
# Local variables are local to methods
class Student:
school = 'PQR' #Static variable
def __init__(self,name,roll,section):
super().__init__()
self.name = name #Instance variable
self.roll = roll
self.section = section
def display(self):
school = 'Local School'
print('Name of student: ',self.name)
print('Roll No of student: ',self.roll)
print('Section of Student: ',self.section)
print('School of Student: ', Student.school) #Static variable
print('Local school value: ', school)
#Student.school = 'ABC School' #Another way to declare static variables
s1 = Student('Student A',101,'A')
s2 = Student('Student B',102,'B')
s1.display()
s2.display()
| 32.653846 | 71 | 0.654888 |
class Student:
school = 'PQR'
def __init__(self,name,roll,section):
super().__init__()
self.name = name
self.roll = roll
self.section = section
def display(self):
school = 'Local School'
print('Name of student: ',self.name)
print('Roll No of student: ',self.roll)
print('Section of Student: ',self.section)
print('School of Student: ', Student.school)
print('Local school value: ', school)
Student('Student B',102,'B')
s1.display()
s2.display()
| true | true |
f733887788b82f8be36163aa08a254e9e20ada0c | 3,592 | py | Python | certbot_plugin_gandi/gandi_api.py | treydock/certbot-plugin-gandi | 773e0da99b361305a32822dac124452d2f78b24d | [
"MIT"
] | null | null | null | certbot_plugin_gandi/gandi_api.py | treydock/certbot-plugin-gandi | 773e0da99b361305a32822dac124452d2f78b24d | [
"MIT"
] | null | null | null | certbot_plugin_gandi/gandi_api.py | treydock/certbot-plugin-gandi | 773e0da99b361305a32822dac124452d2f78b24d | [
"MIT"
] | null | null | null | import requests
import urllib
from collections import namedtuple
from certbot.plugins import dns_common
try:
from urllib import quote # Python 2.X
except ImportError:
from urllib.parse import quote # Python 3+
_GandiConfig = namedtuple('_GandiConfig', ('api_key',))
_BaseDomain = namedtuple('_BaseDomain', ('zone_uuid', 'fqdn'))
def get_config(api_key):
return _GandiConfig(api_key=api_key)
def _get_json(response):
try:
data = response.json()
except ValueError:
return dict()
return data
def _get_response_message(response, default='<No reason given>'):
return _get_json(response).get('message', default)
def _headers(cfg):
return {
'Content-Type': 'application/json',
'X-Api-Key': cfg.api_key
}
def _get_url(*segs):
return 'https://dns.api.gandi.net/api/v5/{}'.format(
'/'.join(quote(seg, safe='') for seg in segs)
)
def _request(cfg, method, segs, **kw):
headers = _headers(cfg)
url = _get_url(*segs)
return requests.request(method, url, headers=headers, **kw)
def _get_base_domain(cfg, domain):
for candidate_base_domain in dns_common.base_domain_name_guesses(domain):
response = _request(cfg, 'GET', ('domains', candidate_base_domain))
if response.ok:
data = _get_json(response)
zone_uuid = data.get('zone_uuid')
fqdn = data.get('fqdn')
if zone_uuid and fqdn:
return _BaseDomain(zone_uuid=zone_uuid, fqdn=fqdn)
return None
def _get_relative_name(base_domain, name):
suffix = '.' + base_domain.fqdn
return name[:-len(suffix)] if name.endswith(suffix) else None
def _del_txt_record(cfg, base_domain, relative_name):
return _request(
cfg,
'DELETE',
('zones', base_domain.zone_uuid, 'records', relative_name, 'TXT'))
def _update_record(cfg, domain, name, request_runner):
base_domain = _get_base_domain(cfg, domain)
if base_domain is None:
return 'Unable to get base domain for "{}"'.format(domain)
relative_name = _get_relative_name(base_domain, name)
if relative_name is None:
return 'Unable to derive relative name for "{}"'.format(name)
response = request_runner(base_domain, relative_name)
return None if response.ok else _get_response_message(response)
def get_txt_records(cfg, domain, name):
base_domain = _get_base_domain(cfg, domain)
if base_domain is None:
return 'Unable to get base domain for "{}"'.format(domain)
relative_name = _get_relative_name(base_domain, name)
if relative_name is None:
return 'Unable to derive relative name for "{}"'.format(name)
response = _request(
cfg,
'GET',
('zones', base_domain.zone_uuid, 'records', relative_name, 'TXT'))
if response.ok:
return response.json().get('rrset_values')
else:
return []
def add_txt_record(cfg, domain, name, value):
def requester(base_domain, relative_name):
_del_txt_record(cfg, base_domain, relative_name)
return _request(
cfg,
'POST',
('zones', base_domain.zone_uuid, 'records', relative_name, 'TXT'),
json={
'rrset_values': value if isinstance(value, list) else [value]
})
return _update_record(cfg, domain, name, requester)
def del_txt_record(cfg, domain, name):
def requester(base_domain, relative_name):
return _del_txt_record(cfg, base_domain, relative_name)
return _update_record(cfg, domain, name, requester)
| 28.0625 | 78 | 0.663419 | import requests
import urllib
from collections import namedtuple
from certbot.plugins import dns_common
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
_GandiConfig = namedtuple('_GandiConfig', ('api_key',))
_BaseDomain = namedtuple('_BaseDomain', ('zone_uuid', 'fqdn'))
def get_config(api_key):
return _GandiConfig(api_key=api_key)
def _get_json(response):
try:
data = response.json()
except ValueError:
return dict()
return data
def _get_response_message(response, default='<No reason given>'):
return _get_json(response).get('message', default)
def _headers(cfg):
return {
'Content-Type': 'application/json',
'X-Api-Key': cfg.api_key
}
def _get_url(*segs):
return 'https://dns.api.gandi.net/api/v5/{}'.format(
'/'.join(quote(seg, safe='') for seg in segs)
)
def _request(cfg, method, segs, **kw):
headers = _headers(cfg)
url = _get_url(*segs)
return requests.request(method, url, headers=headers, **kw)
def _get_base_domain(cfg, domain):
for candidate_base_domain in dns_common.base_domain_name_guesses(domain):
response = _request(cfg, 'GET', ('domains', candidate_base_domain))
if response.ok:
data = _get_json(response)
zone_uuid = data.get('zone_uuid')
fqdn = data.get('fqdn')
if zone_uuid and fqdn:
return _BaseDomain(zone_uuid=zone_uuid, fqdn=fqdn)
return None
def _get_relative_name(base_domain, name):
suffix = '.' + base_domain.fqdn
return name[:-len(suffix)] if name.endswith(suffix) else None
def _del_txt_record(cfg, base_domain, relative_name):
return _request(
cfg,
'DELETE',
('zones', base_domain.zone_uuid, 'records', relative_name, 'TXT'))
def _update_record(cfg, domain, name, request_runner):
base_domain = _get_base_domain(cfg, domain)
if base_domain is None:
return 'Unable to get base domain for "{}"'.format(domain)
relative_name = _get_relative_name(base_domain, name)
if relative_name is None:
return 'Unable to derive relative name for "{}"'.format(name)
response = request_runner(base_domain, relative_name)
return None if response.ok else _get_response_message(response)
def get_txt_records(cfg, domain, name):
base_domain = _get_base_domain(cfg, domain)
if base_domain is None:
return 'Unable to get base domain for "{}"'.format(domain)
relative_name = _get_relative_name(base_domain, name)
if relative_name is None:
return 'Unable to derive relative name for "{}"'.format(name)
response = _request(
cfg,
'GET',
('zones', base_domain.zone_uuid, 'records', relative_name, 'TXT'))
if response.ok:
return response.json().get('rrset_values')
else:
return []
def add_txt_record(cfg, domain, name, value):
def requester(base_domain, relative_name):
_del_txt_record(cfg, base_domain, relative_name)
return _request(
cfg,
'POST',
('zones', base_domain.zone_uuid, 'records', relative_name, 'TXT'),
json={
'rrset_values': value if isinstance(value, list) else [value]
})
return _update_record(cfg, domain, name, requester)
def del_txt_record(cfg, domain, name):
def requester(base_domain, relative_name):
return _del_txt_record(cfg, base_domain, relative_name)
return _update_record(cfg, domain, name, requester)
| true | true |
f73388a718dc5103b5644663388ec3af653ac9b9 | 1,701 | py | Python | app/core/migrations/0001_initial.py | LiMichael1/RecipeAPI | ef3bd0a1223277712cf5f6996f9f627c6e4c9339 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | LiMichael1/RecipeAPI | ef3bd0a1223277712cf5f6996f9f627c6e4c9339 | [
"MIT"
] | null | null | null | app/core/migrations/0001_initial.py | LiMichael1/RecipeAPI | ef3bd0a1223277712cf5f6996f9f627c6e4c9339 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.6 on 2020-05-24 19:49
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412 | 266 | 0.637272 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true | true |
f7338924a05e595dd854c107878cfbdff8e4e5fe | 12,939 | py | Python | build/go/build.py | EnderNightLord-ChromeBook/zircon-rpi | b09b1eb3aa7a127c65568229fe10edd251869283 | [
"BSD-2-Clause"
] | 1 | 2020-12-29T17:07:06.000Z | 2020-12-29T17:07:06.000Z | build/go/build.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | null | null | null | build/go/build.py | DamieFC/fuchsia | f78a4a1326f4a4bb5834500918756173c01bab4f | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3.8
# Copyright 2017 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Build script for a Go app.
import argparse
import os
import subprocess
import sys
import string
import shutil
import errno
from gen_library_metadata import get_sources
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--godepfile', help='Path to godepfile tool', required=True)
parser.add_argument(
'--root-out-dir', help='Path to root of build output', required=True)
parser.add_argument(
'--cc', help='The C compiler to use', required=False, default='cc')
parser.add_argument(
'--cxx', help='The C++ compiler to use', required=False, default='c++')
parser.add_argument(
'--dump-syms', help='The dump_syms tool to use', required=False)
parser.add_argument(
'--objcopy',
help='The objcopy tool to use',
required=False,
default='objcopy')
parser.add_argument('--sysroot', help='The sysroot to use', required=False)
parser.add_argument(
'--target', help='The compiler target to use', required=False)
parser.add_argument(
'--depfile', help='The path to the depfile', required=False)
parser.add_argument(
'--current-cpu',
help='Target architecture.',
choices=['x64', 'arm64'],
required=True)
parser.add_argument(
'--current-os',
help='Target operating system.',
choices=['fuchsia', 'linux', 'mac', 'win'],
required=True)
parser.add_argument('--buildidtool', help='The path to the buildidtool.')
parser.add_argument(
'--build-id-dir', help='The path to the .build-id directory.')
parser.add_argument(
'--go-root', help='The go root to use for builds.', required=True)
parser.add_argument(
'--go-cache', help='Cache directory to use for builds.', required=False)
parser.add_argument(
'--is-test', help='True if the target is a go test', default=False)
parser.add_argument('--buildmode', help='Build mode to use')
parser.add_argument(
'--gcflag',
help='Arguments to pass to Go compiler',
action='append',
default=[])
parser.add_argument(
'--ldflag',
help='Arguments to pass to Go linker',
action='append',
default=[])
parser.add_argument(
'--go-dep-files',
help='List of files describing library dependencies',
nargs='*',
default=[])
parser.add_argument(
'--root-build-dir',
help='Root build directory. Required if --go-dep-files is used.')
parser.add_argument('--binname', help='Output file', required=True)
parser.add_argument(
'--output-path',
help='Where to output the (unstripped) binary',
required=True)
parser.add_argument(
'--stripped-output-path',
help='Where to output a stripped binary, if supplied')
parser.add_argument(
'--verbose',
help='Tell the go tool to be verbose about what it is doing',
action='store_true')
parser.add_argument('--package', help='The package name', required=True)
parser.add_argument(
'--include-dir',
help='-isystem path to add',
action='append',
default=[])
parser.add_argument(
'--lib-dir', help='-L path to add', action='append', default=[])
parser.add_argument('--vet', help='Run go vet', action='store_true')
parser.add_argument(
'--tag', help='Add a go build tag', default=[], action='append')
parser.add_argument(
'--cgo', help='Whether to enable CGo', action='store_true')
args = parser.parse_args()
try:
os.makedirs(args.go_cache)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(args.go_cache):
pass
else:
raise
goarch = {
'x64': 'amd64',
'arm64': 'arm64',
}[args.current_cpu]
goos = {
'fuchsia': 'fuchsia',
'linux': 'linux',
'mac': 'darwin',
'win': 'windows',
}[args.current_os]
build_id_dir = os.path.join(args.root_out_dir, '.build-id')
dist = args.stripped_output_path or args.output_path
# Project path is a package specific gopath, also known as a "project" in go parlance.
project_path = os.path.join(
args.root_out_dir, 'gen', 'gopaths', args.binname)
# Clean up any old project path to avoid leaking old dependencies.
gopath_src = os.path.join(project_path, 'src')
if os.path.exists(gopath_src):
shutil.rmtree(gopath_src)
os.makedirs(gopath_src)
link_to_source_list = []
if args.go_dep_files:
assert args.root_build_dir, (
'--root-build-dir is required with --go-dep-files')
root_build_dir = os.path.abspath(args.root_build_dir)
link_to_source = {}
# Create a GOPATH for the packages dependency tree.
for dst, src in sorted(get_sources(args.go_dep_files).items()):
# Determine if the src path should
# - be mapped as-is which, if src is a directory, includes all subdirectories
# - have its contents enumerated and mapped directly
map_directly = False
if dst.endswith('/...'):
# When a directory and all its subdirectories must be made available, map
# the directory directly.
map_directly = True
# - src can have a '/...' suffix like with 'github.com/google/go-cmp/...'.
# - This means all subpackages are being symlinked to the GOPATH.
# - dst have the suffix when defining a package.
# - src can only have the suffix if dst has it too.
assert dst.endswith('/...') >= src.endswith('/...'), (dst, src)
dst = dst[:-4]
if src.endswith('/...'):
src = src[:-4]
elif os.path.isfile(src):
# When sources are explicitly listed in the BUILD.gn file, each `src` will
# be a path to a file that must be mapped directly.
map_directly = True
# Paths with /.../ in the middle designate go packages that include
# subpackages, but also explicitly list all their source files.
# The construction of these paths is done in the
# godepfile tool, so we remove these sentinel values here.
dst = dst.replace('/.../', '/')
dstdir = os.path.join(gopath_src, dst)
if map_directly:
# Make a symlink to the src directory or file.
parent = os.path.dirname(dstdir)
if not os.path.exists(parent):
os.makedirs(parent)
os.symlink(src, dstdir)
link_to_source[os.path.join(root_build_dir, dstdir)] = src
else:
# Map individual files since the dependency is only on the
# package itself, not Go subpackages. The only exception is
# 'testdata'.
os.makedirs(dstdir)
for filename in os.listdir(src):
src_file = os.path.join(src, filename)
if filename == 'testdata' or os.path.isfile(src_file):
os.symlink(src_file, os.path.join(dstdir, filename))
link_to_source[os.path.join(
root_build_dir, dstdir, filename)] = src
# Create a sorted list of (link, src) pairs, with longest paths before
# short one. This ensures that 'foobar' will appear before 'foo'.
link_to_source_list = sorted(
link_to_source.items(), key=lambda x: x[0], reverse=True)
cflags = []
if args.sysroot:
cflags.extend(['--sysroot', args.sysroot])
if args.target:
cflags.extend(['-target', args.target])
ldflags = cflags[:]
if args.current_os == 'linux':
ldflags.extend(
[
'-stdlib=libc++',
# TODO(fxbug.dev/64336): the following flags are not recognized by CGo.
# '-rtlib=compiler-rt',
# '-unwindlib=',
])
for dir in args.include_dir:
cflags.extend(['-isystem', dir])
ldflags.extend(['-L' + dir for dir in args.lib_dir])
cflags_joined = ' '.join(cflags)
ldflags_joined = ' '.join(ldflags)
gopath = os.path.abspath(project_path)
build_goroot = os.path.abspath(args.go_root)
env = {
# /usr/bin:/bin are required for basic things like bash(1) and env(1). Note
# that on Mac, ld is also found from /usr/bin.
'PATH': os.path.join(build_goroot, 'bin') + ':/usr/bin:/bin',
# Disable modules to ensure Go doesn't try to download dependencies.
'GO111MODULE': 'off',
'GOARCH': goarch,
'GOOS': goos,
'GOPATH': gopath,
# Some users have GOROOT set in their parent environment, which can break
# things, so it is always set explicitly here.
'GOROOT': build_goroot,
'GOCACHE': args.go_cache,
'CC': args.cc,
'CXX': args.cxx,
'CGO_CFLAGS': cflags_joined,
'CGO_CPPFLAGS': cflags_joined,
'CGO_CXXFLAGS': cflags_joined,
'CGO_LDFLAGS': ldflags_joined,
}
# Infra sets $TMPDIR which is cleaned between builds.
if os.getenv('TMPDIR'):
env['TMPDIR'] = os.getenv('TMPDIR')
if args.cgo:
env['CGO_ENABLED'] = '1'
if args.target:
env['CC_FOR_TARGET'] = env['CC']
env['CXX_FOR_TARGET'] = env['CXX']
go_tool = os.path.join(build_goroot, 'bin', 'go')
if args.vet:
retcode = subprocess.call([go_tool, 'vet', args.package], env=env)
if retcode != 0:
return retcode
cmd = [go_tool]
if args.is_test:
cmd += ['test', '-c']
else:
cmd += ['build', '-trimpath']
if args.verbose:
cmd += ['-x']
if args.tag:
# Separate tags by spaces. This behavior is actually deprecated in the
# go command line, but Fuchsia currently has an older version of go
# that hasn't switched to commas.
cmd += ['-tags', ' '.join(args.tag)]
if args.buildmode:
cmd += ['-buildmode', args.buildmode]
if args.gcflag:
cmd += ['-gcflags', ' '.join(args.gcflag)]
if args.ldflag:
cmd += ['-ldflags=' + ' '.join(args.ldflag)]
cmd += [
'-pkgdir',
os.path.join(project_path, 'pkg'),
'-o',
args.output_path,
args.package,
]
retcode = subprocess.call(cmd, env=env)
if retcode == 0 and args.stripped_output_path:
if args.current_os == 'mac':
retcode = subprocess.call(
[
'xcrun', 'strip', '-x', args.output_path, '-o',
args.stripped_output_path
],
env=env)
else:
retcode = subprocess.call(
[
args.objcopy, '--strip-sections', args.output_path,
args.stripped_output_path
],
env=env)
# TODO(fxbug.dev/27215): Also invoke the buildidtool in the case of linux
# once buildidtool knows how to deal in Go's native build ID format.
supports_build_id = args.current_os == 'fuchsia'
if retcode == 0 and args.dump_syms and supports_build_id:
if args.current_os == 'fuchsia':
with open(dist + '.sym', 'w') as f:
retcode = subprocess.call(
[args.dump_syms, '-r', '-o', 'Fuchsia', args.output_path],
stdout=f)
if retcode == 0 and args.buildidtool and supports_build_id:
if not args.build_id_dir:
raise ValueError('Using --buildidtool requires --build-id-dir')
retcode = subprocess.call(
[
args.buildidtool,
'-build-id-dir',
args.build_id_dir,
'-stamp',
dist + '.build-id.stamp',
'-entry',
'.debug=' + args.output_path,
'-entry',
'=' + dist,
])
if retcode == 0:
if args.depfile is not None:
godepfile_args = [args.godepfile, '-o', dist]
for f, t in link_to_source_list:
godepfile_args += ['-prefixmap', '%s=%s' % (f, t)]
if args.is_test:
godepfile_args += ['-test']
godepfile_args += [args.package]
with open(args.depfile, 'wb') as into:
subprocess.check_call(godepfile_args, env=env, stdout=into)
return retcode
if __name__ == '__main__':
sys.exit(main())
| 36.863248 | 90 | 0.568359 |
import argparse
import os
import subprocess
import sys
import string
import shutil
import errno
from gen_library_metadata import get_sources
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--godepfile', help='Path to godepfile tool', required=True)
parser.add_argument(
'--root-out-dir', help='Path to root of build output', required=True)
parser.add_argument(
'--cc', help='The C compiler to use', required=False, default='cc')
parser.add_argument(
'--cxx', help='The C++ compiler to use', required=False, default='c++')
parser.add_argument(
'--dump-syms', help='The dump_syms tool to use', required=False)
parser.add_argument(
'--objcopy',
help='The objcopy tool to use',
required=False,
default='objcopy')
parser.add_argument('--sysroot', help='The sysroot to use', required=False)
parser.add_argument(
'--target', help='The compiler target to use', required=False)
parser.add_argument(
'--depfile', help='The path to the depfile', required=False)
parser.add_argument(
'--current-cpu',
help='Target architecture.',
choices=['x64', 'arm64'],
required=True)
parser.add_argument(
'--current-os',
help='Target operating system.',
choices=['fuchsia', 'linux', 'mac', 'win'],
required=True)
parser.add_argument('--buildidtool', help='The path to the buildidtool.')
parser.add_argument(
'--build-id-dir', help='The path to the .build-id directory.')
parser.add_argument(
'--go-root', help='The go root to use for builds.', required=True)
parser.add_argument(
'--go-cache', help='Cache directory to use for builds.', required=False)
parser.add_argument(
'--is-test', help='True if the target is a go test', default=False)
parser.add_argument('--buildmode', help='Build mode to use')
parser.add_argument(
'--gcflag',
help='Arguments to pass to Go compiler',
action='append',
default=[])
parser.add_argument(
'--ldflag',
help='Arguments to pass to Go linker',
action='append',
default=[])
parser.add_argument(
'--go-dep-files',
help='List of files describing library dependencies',
nargs='*',
default=[])
parser.add_argument(
'--root-build-dir',
help='Root build directory. Required if --go-dep-files is used.')
parser.add_argument('--binname', help='Output file', required=True)
parser.add_argument(
'--output-path',
help='Where to output the (unstripped) binary',
required=True)
parser.add_argument(
'--stripped-output-path',
help='Where to output a stripped binary, if supplied')
parser.add_argument(
'--verbose',
help='Tell the go tool to be verbose about what it is doing',
action='store_true')
parser.add_argument('--package', help='The package name', required=True)
parser.add_argument(
'--include-dir',
help='-isystem path to add',
action='append',
default=[])
parser.add_argument(
'--lib-dir', help='-L path to add', action='append', default=[])
parser.add_argument('--vet', help='Run go vet', action='store_true')
parser.add_argument(
'--tag', help='Add a go build tag', default=[], action='append')
parser.add_argument(
'--cgo', help='Whether to enable CGo', action='store_true')
args = parser.parse_args()
try:
os.makedirs(args.go_cache)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(args.go_cache):
pass
else:
raise
goarch = {
'x64': 'amd64',
'arm64': 'arm64',
}[args.current_cpu]
goos = {
'fuchsia': 'fuchsia',
'linux': 'linux',
'mac': 'darwin',
'win': 'windows',
}[args.current_os]
build_id_dir = os.path.join(args.root_out_dir, '.build-id')
dist = args.stripped_output_path or args.output_path
project_path = os.path.join(
args.root_out_dir, 'gen', 'gopaths', args.binname)
gopath_src = os.path.join(project_path, 'src')
if os.path.exists(gopath_src):
shutil.rmtree(gopath_src)
os.makedirs(gopath_src)
link_to_source_list = []
if args.go_dep_files:
assert args.root_build_dir, (
'--root-build-dir is required with --go-dep-files')
root_build_dir = os.path.abspath(args.root_build_dir)
link_to_source = {}
for dst, src in sorted(get_sources(args.go_dep_files).items()):
map_directly = False
if dst.endswith('/...'):
map_directly = True
assert dst.endswith('/...') >= src.endswith('/...'), (dst, src)
dst = dst[:-4]
if src.endswith('/...'):
src = src[:-4]
elif os.path.isfile(src):
map_directly = True
dst = dst.replace('/.../', '/')
dstdir = os.path.join(gopath_src, dst)
if map_directly:
parent = os.path.dirname(dstdir)
if not os.path.exists(parent):
os.makedirs(parent)
os.symlink(src, dstdir)
link_to_source[os.path.join(root_build_dir, dstdir)] = src
else:
os.makedirs(dstdir)
for filename in os.listdir(src):
src_file = os.path.join(src, filename)
if filename == 'testdata' or os.path.isfile(src_file):
os.symlink(src_file, os.path.join(dstdir, filename))
link_to_source[os.path.join(
root_build_dir, dstdir, filename)] = src
link_to_source_list = sorted(
link_to_source.items(), key=lambda x: x[0], reverse=True)
cflags = []
if args.sysroot:
cflags.extend(['--sysroot', args.sysroot])
if args.target:
cflags.extend(['-target', args.target])
ldflags = cflags[:]
if args.current_os == 'linux':
ldflags.extend(
[
'-stdlib=libc++',
])
for dir in args.include_dir:
cflags.extend(['-isystem', dir])
ldflags.extend(['-L' + dir for dir in args.lib_dir])
cflags_joined = ' '.join(cflags)
ldflags_joined = ' '.join(ldflags)
gopath = os.path.abspath(project_path)
build_goroot = os.path.abspath(args.go_root)
env = {
'PATH': os.path.join(build_goroot, 'bin') + ':/usr/bin:/bin',
'GO111MODULE': 'off',
'GOARCH': goarch,
'GOOS': goos,
'GOPATH': gopath,
# Some users have GOROOT set in their parent environment, which can break
# things, so it is always set explicitly here.
'GOROOT': build_goroot,
'GOCACHE': args.go_cache,
'CC': args.cc,
'CXX': args.cxx,
'CGO_CFLAGS': cflags_joined,
'CGO_CPPFLAGS': cflags_joined,
'CGO_CXXFLAGS': cflags_joined,
'CGO_LDFLAGS': ldflags_joined,
}
# Infra sets $TMPDIR which is cleaned between builds.
if os.getenv('TMPDIR'):
env['TMPDIR'] = os.getenv('TMPDIR')
if args.cgo:
env['CGO_ENABLED'] = '1'
if args.target:
env['CC_FOR_TARGET'] = env['CC']
env['CXX_FOR_TARGET'] = env['CXX']
go_tool = os.path.join(build_goroot, 'bin', 'go')
if args.vet:
retcode = subprocess.call([go_tool, 'vet', args.package], env=env)
if retcode != 0:
return retcode
cmd = [go_tool]
if args.is_test:
cmd += ['test', '-c']
else:
cmd += ['build', '-trimpath']
if args.verbose:
cmd += ['-x']
if args.tag:
# Separate tags by spaces. This behavior is actually deprecated in the
# go command line, but Fuchsia currently has an older version of go
# that hasn't switched to commas.
cmd += ['-tags', ' '.join(args.tag)]
if args.buildmode:
cmd += ['-buildmode', args.buildmode]
if args.gcflag:
cmd += ['-gcflags', ' '.join(args.gcflag)]
if args.ldflag:
cmd += ['-ldflags=' + ' '.join(args.ldflag)]
cmd += [
'-pkgdir',
os.path.join(project_path, 'pkg'),
'-o',
args.output_path,
args.package,
]
retcode = subprocess.call(cmd, env=env)
if retcode == 0 and args.stripped_output_path:
if args.current_os == 'mac':
retcode = subprocess.call(
[
'xcrun', 'strip', '-x', args.output_path, '-o',
args.stripped_output_path
],
env=env)
else:
retcode = subprocess.call(
[
args.objcopy, '--strip-sections', args.output_path,
args.stripped_output_path
],
env=env)
supports_build_id = args.current_os == 'fuchsia'
if retcode == 0 and args.dump_syms and supports_build_id:
if args.current_os == 'fuchsia':
with open(dist + '.sym', 'w') as f:
retcode = subprocess.call(
[args.dump_syms, '-r', '-o', 'Fuchsia', args.output_path],
stdout=f)
if retcode == 0 and args.buildidtool and supports_build_id:
if not args.build_id_dir:
raise ValueError('Using --buildidtool requires --build-id-dir')
retcode = subprocess.call(
[
args.buildidtool,
'-build-id-dir',
args.build_id_dir,
'-stamp',
dist + '.build-id.stamp',
'-entry',
'.debug=' + args.output_path,
'-entry',
'=' + dist,
])
if retcode == 0:
if args.depfile is not None:
godepfile_args = [args.godepfile, '-o', dist]
for f, t in link_to_source_list:
godepfile_args += ['-prefixmap', '%s=%s' % (f, t)]
if args.is_test:
godepfile_args += ['-test']
godepfile_args += [args.package]
with open(args.depfile, 'wb') as into:
subprocess.check_call(godepfile_args, env=env, stdout=into)
return retcode
if __name__ == '__main__':
sys.exit(main())
| true | true |
f73389366327068d6dbba416f0132cddf5ec3000 | 1,207 | py | Python | dev_tools/test_pattern.py | SocialSisterYi/Alconna | 3e1d986ca5486dfd3c7bd80118a75364ab6831b8 | [
"MIT"
] | null | null | null | dev_tools/test_pattern.py | SocialSisterYi/Alconna | 3e1d986ca5486dfd3c7bd80118a75364ab6831b8 | [
"MIT"
] | null | null | null | dev_tools/test_pattern.py | SocialSisterYi/Alconna | 3e1d986ca5486dfd3c7bd80118a75364ab6831b8 | [
"MIT"
] | null | null | null | from arclet.alconna.types import ObjectPattern, add_check, ArgPattern, PatternToken
from arclet.alconna import AlconnaFire
from graia.ariadne.message.chain import MessageChain
from graia.ariadne.message.element import Plain, Image, At, MusicShare
from graia.ariadne.app import Ariadne, MiraiSession
bot = Ariadne(connect_info=MiraiSession(host="http://localhost:8080", verify_key="1234567890abcdef", account=123456789))
add_check(ArgPattern("ariadne", PatternToken.REGEX_TRANSFORM, Ariadne, lambda x: bot, 'app'))
ObjectPattern(Plain, limit=("text",))
ObjectPattern(Image, limit=("url",))
ObjectPattern(At, limit=("target",))
ObjectPattern(MusicShare, flag="json")
async def test(app: Ariadne, text: Plain, img: Image, at: At, music: MusicShare):
print(locals())
msg = MessageChain.create([at, text, img])
print(repr(msg))
print(await app.sendGroupMessage(at.target, msg))
alc = AlconnaFire(test)
alc.parse("test ariadne 'hello world!' https://www.baidu.com/img/bd_logo1.png 123456 \"{'kind':'QQMusic','title':'音乐标题','summary':'音乐摘要','jumpUrl':'http://www.baidu.com','pictureUrl':'http://www.baidu.com/img/bd_logo1.png','musicUrl':'http://www.baidu.com/audio/bd.mp3','brief':'简介'}\"")
| 48.28 | 287 | 0.74565 | from arclet.alconna.types import ObjectPattern, add_check, ArgPattern, PatternToken
from arclet.alconna import AlconnaFire
from graia.ariadne.message.chain import MessageChain
from graia.ariadne.message.element import Plain, Image, At, MusicShare
from graia.ariadne.app import Ariadne, MiraiSession
bot = Ariadne(connect_info=MiraiSession(host="http://localhost:8080", verify_key="1234567890abcdef", account=123456789))
add_check(ArgPattern("ariadne", PatternToken.REGEX_TRANSFORM, Ariadne, lambda x: bot, 'app'))
ObjectPattern(Plain, limit=("text",))
ObjectPattern(Image, limit=("url",))
ObjectPattern(At, limit=("target",))
ObjectPattern(MusicShare, flag="json")
async def test(app: Ariadne, text: Plain, img: Image, at: At, music: MusicShare):
print(locals())
msg = MessageChain.create([at, text, img])
print(repr(msg))
print(await app.sendGroupMessage(at.target, msg))
alc = AlconnaFire(test)
alc.parse("test ariadne 'hello world!' https://www.baidu.com/img/bd_logo1.png 123456 \"{'kind':'QQMusic','title':'音乐标题','summary':'音乐摘要','jumpUrl':'http://www.baidu.com','pictureUrl':'http://www.baidu.com/img/bd_logo1.png','musicUrl':'http://www.baidu.com/audio/bd.mp3','brief':'简介'}\"")
| true | true |
f73389df235a94a0c337a0c36489840ae2883f92 | 252 | py | Python | tests/unit/test_algorithms_dsatuto.py | gauthier-emse/pyDcop | a51cc3f7d8ef9ee1f863beeca4ad60490862d2ed | [
"BSD-3-Clause"
] | 28 | 2018-05-18T10:25:58.000Z | 2022-03-05T16:24:15.000Z | tests/unit/test_algorithms_dsatuto.py | gauthier-emse/pyDcop | a51cc3f7d8ef9ee1f863beeca4ad60490862d2ed | [
"BSD-3-Clause"
] | 19 | 2018-09-21T21:50:15.000Z | 2022-02-22T20:23:32.000Z | tests/unit/test_algorithms_dsatuto.py | gauthier-emse/pyDcop | a51cc3f7d8ef9ee1f863beeca4ad60490862d2ed | [
"BSD-3-Clause"
] | 17 | 2018-05-29T19:54:07.000Z | 2022-02-22T20:14:46.000Z | from importlib import import_module
from pydcop.algorithms import AlgorithmDef, ComputationDef, load_algorithm_module
from pydcop.computations_graph.constraints_hypergraph import \
VariableComputationNode
from pydcop.dcop.objects import Variable
| 31.5 | 81 | 0.869048 | from importlib import import_module
from pydcop.algorithms import AlgorithmDef, ComputationDef, load_algorithm_module
from pydcop.computations_graph.constraints_hypergraph import \
VariableComputationNode
from pydcop.dcop.objects import Variable
| true | true |
f7338a76697ba4e87c7780dbe95880201fde6819 | 75 | py | Python | vertigo/datasets/__init__.py | rmarkello/vertigo | 35c79faf3a62b9b3941f0c989640c2f5de8f819e | [
"Apache-2.0"
] | null | null | null | vertigo/datasets/__init__.py | rmarkello/vertigo | 35c79faf3a62b9b3941f0c989640c2f5de8f819e | [
"Apache-2.0"
] | null | null | null | vertigo/datasets/__init__.py | rmarkello/vertigo | 35c79faf3a62b9b3941f0c989640c2f5de8f819e | [
"Apache-2.0"
] | null | null | null | __all__ = [
'fetch_fsaverage'
]
from .fetchers import fetch_fsaverage
| 12.5 | 37 | 0.733333 | __all__ = [
'fetch_fsaverage'
]
from .fetchers import fetch_fsaverage
| true | true |
f7338ac94d4fb8af8870a4afc13f0019a80d21c6 | 2,417 | py | Python | setup.py | Alymostafa/torch-cam | 3f30f0db90fba1b921dbe71e979001c954d245da | [
"MIT"
] | 1 | 2020-11-17T18:20:56.000Z | 2020-11-17T18:20:56.000Z | setup.py | Alymostafa/torch-cam | 3f30f0db90fba1b921dbe71e979001c954d245da | [
"MIT"
] | null | null | null | setup.py | Alymostafa/torch-cam | 3f30f0db90fba1b921dbe71e979001c954d245da | [
"MIT"
] | 1 | 2021-01-04T20:28:20.000Z | 2021-01-04T20:28:20.000Z | #!usr/bin/python
# -*- coding: utf-8 -*-
"""
Package installation setup
"""
import os
import subprocess
from setuptools import find_packages, setup
version = '0.1.2a0'
sha = 'Unknown'
package_name = 'torchcam'
cwd = os.path.dirname(os.path.abspath(__file__))
try:
sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
except Exception:
pass
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha != 'Unknown':
version += '+' + sha[:7]
print("Building wheel {}-{}".format(package_name, version))
def write_version_file():
version_path = os.path.join(cwd, 'torchcam', 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(version))
write_version_file()
with open('README.md') as f:
readme = f.read()
requirements = [
'torch>=1.1.0',
'numpy>=1.14.0',
'pillow>=5.0.0',
'matplotlib>=3.0.0'
]
setup(
# Metadata
name=package_name,
version=version,
author='François-Guillaume Fernandez',
description='Class activation maps for your PyTorch CNN models',
long_description=readme,
long_description_content_type="text/markdown",
url='https://github.com/frgfm/torch-cam',
download_url='https://github.com/frgfm/torch-cam/tags',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords=['pytorch', 'deep learning', 'cnn', 'convolution', 'activation', 'gradcam'],
# Package info
packages=find_packages(exclude=('test',)),
zip_safe=True,
python_requires='>=3.6.0',
include_package_data=True,
install_requires=requirements,
package_data={'': ['LICENSE']}
)
| 27.781609 | 96 | 0.639222 |
import os
import subprocess
from setuptools import find_packages, setup
version = '0.1.2a0'
sha = 'Unknown'
package_name = 'torchcam'
cwd = os.path.dirname(os.path.abspath(__file__))
try:
sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
except Exception:
pass
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha != 'Unknown':
version += '+' + sha[:7]
print("Building wheel {}-{}".format(package_name, version))
def write_version_file():
version_path = os.path.join(cwd, 'torchcam', 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(version))
write_version_file()
with open('README.md') as f:
readme = f.read()
requirements = [
'torch>=1.1.0',
'numpy>=1.14.0',
'pillow>=5.0.0',
'matplotlib>=3.0.0'
]
setup(
name=package_name,
version=version,
author='François-Guillaume Fernandez',
description='Class activation maps for your PyTorch CNN models',
long_description=readme,
long_description_content_type="text/markdown",
url='https://github.com/frgfm/torch-cam',
download_url='https://github.com/frgfm/torch-cam/tags',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords=['pytorch', 'deep learning', 'cnn', 'convolution', 'activation', 'gradcam'],
packages=find_packages(exclude=('test',)),
zip_safe=True,
python_requires='>=3.6.0',
include_package_data=True,
install_requires=requirements,
package_data={'': ['LICENSE']}
)
| true | true |
f7338b27cafceb5086af9c433c59de1f6156099b | 81,197 | py | Python | sphinx/writers/latex.py | hkuno/sphinx | d62220676d38f8d588fb59f92c3169385e94ad00 | [
"BSD-2-Clause"
] | 2 | 2015-02-05T13:09:34.000Z | 2015-06-24T19:39:03.000Z | sphinx/writers/latex.py | jfbu/sphinx | d62220676d38f8d588fb59f92c3169385e94ad00 | [
"BSD-2-Clause"
] | 1 | 2016-06-14T07:25:48.000Z | 2016-06-14T07:25:48.000Z | sphinx/writers/latex.py | jfbu/sphinx | d62220676d38f8d588fb59f92c3169385e94ad00 | [
"BSD-2-Clause"
] | 1 | 2020-07-14T15:46:16.000Z | 2020-07-14T15:46:16.000Z | """
sphinx.writers.latex
~~~~~~~~~~~~~~~~~~~~
Custom docutils writer for LaTeX.
Much of this code is adapted from Dave Kuhlman's "docpy" writer from his
docutils sandbox.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import warnings
from collections import defaultdict
from os import path
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Set, Tuple, cast
from docutils import nodes, writers
from docutils.nodes import Element, Node, Text
from sphinx import addnodes, highlighting
from sphinx.deprecation import RemovedInSphinx70Warning
from sphinx.domains import IndexEntry
from sphinx.domains.std import StandardDomain
from sphinx.errors import SphinxError
from sphinx.locale import _, __, admonitionlabels
from sphinx.util import logging, split_into, texescape
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.nodes import clean_astext, get_prev_node
from sphinx.util.template import LaTeXRenderer
from sphinx.util.texescape import tex_replace_map
try:
from docutils.utils.roman import toRoman
except ImportError:
# In Debian/Ubuntu, roman package is provided as roman, not as docutils.utils.roman
from roman import toRoman # type: ignore
if TYPE_CHECKING:
from sphinx.builders.latex import LaTeXBuilder
from sphinx.builders.latex.theming import Theme
logger = logging.getLogger(__name__)
MAX_CITATION_LABEL_LENGTH = 8
LATEXSECTIONNAMES = ["part", "chapter", "section", "subsection",
"subsubsection", "paragraph", "subparagraph"]
ENUMERATE_LIST_STYLE = defaultdict(lambda: r'\arabic',
{
'arabic': r'\arabic',
'loweralpha': r'\alph',
'upperalpha': r'\Alph',
'lowerroman': r'\roman',
'upperroman': r'\Roman',
})
CR = '\n'
BLANKLINE = '\n\n'
EXTRA_RE = re.compile(r'^(.*\S)\s+\(([^()]*)\)\s*$')
class collected_footnote(nodes.footnote):
"""Footnotes that are collected are assigned this class."""
class UnsupportedError(SphinxError):
category = 'Markup is unsupported in LaTeX'
class LaTeXWriter(writers.Writer):
supported = ('sphinxlatex',)
settings_spec = ('LaTeX writer options', '', (
('Document name', ['--docname'], {'default': ''}),
('Document class', ['--docclass'], {'default': 'manual'}),
('Author', ['--author'], {'default': ''}),
))
settings_defaults: Dict = {}
output = None
def __init__(self, builder: "LaTeXBuilder") -> None:
super().__init__()
self.builder = builder
self.theme: Theme = None
def translate(self) -> None:
visitor = self.builder.create_translator(self.document, self.builder, self.theme)
self.document.walkabout(visitor)
self.output = cast(LaTeXTranslator, visitor).astext()
# Helper classes
class Table:
"""A table data"""
def __init__(self, node: Element) -> None:
self.header: List[str] = []
self.body: List[str] = []
self.align = node.get('align', 'default')
self.classes: List[str] = node.get('classes', [])
self.colcount = 0
self.colspec: str = None
self.colwidths: List[int] = []
self.has_problematic = False
self.has_oldproblematic = False
self.has_verbatim = False
self.caption: List[str] = None
self.stubs: List[int] = []
# current position
self.col = 0
self.row = 0
# A dict mapping a table location to a cell_id (cell = rectangular area)
self.cells: Dict[Tuple[int, int], int] = defaultdict(int)
self.cell_id = 0 # last assigned cell_id
def is_longtable(self) -> bool:
"""True if and only if table uses longtable environment."""
return self.row > 30 or 'longtable' in self.classes
def get_table_type(self) -> str:
"""Returns the LaTeX environment name for the table.
The class currently supports:
* longtable
* tabular
* tabulary
"""
if self.is_longtable():
return 'longtable'
elif self.has_verbatim:
return 'tabular'
elif self.colspec:
return 'tabulary'
elif self.has_problematic or (self.colwidths and 'colwidths-given' in self.classes):
return 'tabular'
else:
return 'tabulary'
def get_colspec(self) -> str:
"""Returns a column spec of table.
This is what LaTeX calls the 'preamble argument' of the used table environment.
.. note:: the ``\\X`` and ``T`` column type specifiers are defined in ``sphinx.sty``.
"""
if self.colspec:
return self.colspec
elif self.colwidths and 'colwidths-given' in self.classes:
total = sum(self.colwidths)
colspecs = [r'\X{%d}{%d}' % (width, total) for width in self.colwidths]
return '{|%s|}' % '|'.join(colspecs) + CR
elif self.has_problematic:
return r'{|*{%d}{\X{1}{%d}|}}' % (self.colcount, self.colcount) + CR
elif self.get_table_type() == 'tabulary':
# sphinx.sty sets T to be J by default.
return '{|' + ('T|' * self.colcount) + '}' + CR
elif self.has_oldproblematic:
return r'{|*{%d}{\X{1}{%d}|}}' % (self.colcount, self.colcount) + CR
else:
return '{|' + ('l|' * self.colcount) + '}' + CR
def add_cell(self, height: int, width: int) -> None:
"""Adds a new cell to a table.
It will be located at current position: (``self.row``, ``self.col``).
"""
self.cell_id += 1
for col in range(width):
for row in range(height):
assert self.cells[(self.row + row, self.col + col)] == 0
self.cells[(self.row + row, self.col + col)] = self.cell_id
def cell(self, row: int = None, col: int = None) -> "TableCell":
"""Returns a cell object (i.e. rectangular area) containing given position.
If no option arguments: ``row`` or ``col`` are given, the current position;
``self.row`` and ``self.col`` are used to get a cell object by default.
"""
try:
if row is None:
row = self.row
if col is None:
col = self.col
return TableCell(self, row, col)
except IndexError:
return None
class TableCell:
"""Data of a cell in a table."""
def __init__(self, table: Table, row: int, col: int) -> None:
if table.cells[(row, col)] == 0:
raise IndexError
self.table = table
self.cell_id = table.cells[(row, col)]
self.row = row
self.col = col
# adjust position for multirow/multicol cell
while table.cells[(self.row - 1, self.col)] == self.cell_id:
self.row -= 1
while table.cells[(self.row, self.col - 1)] == self.cell_id:
self.col -= 1
@property
def width(self) -> int:
"""Returns the cell width."""
width = 0
while self.table.cells[(self.row, self.col + width)] == self.cell_id:
width += 1
return width
@property
def height(self) -> int:
"""Returns the cell height."""
height = 0
while self.table.cells[(self.row + height, self.col)] == self.cell_id:
height += 1
return height
def escape_abbr(text: str) -> str:
"""Adjust spacing after abbreviations."""
return re.sub(r'\.(?=\s|$)', r'.\@', text)
def rstdim_to_latexdim(width_str: str, scale: int = 100) -> str:
"""Convert `width_str` with rst length to LaTeX length."""
match = re.match(r'^(\d*\.?\d*)\s*(\S*)$', width_str)
if not match:
raise ValueError
res = width_str
amount, unit = match.groups()[:2]
if scale == 100:
float(amount) # validate amount is float
if unit in ('', "px"):
res = r"%s\sphinxpxdimen" % amount
elif unit == 'pt':
res = '%sbp' % amount # convert to 'bp'
elif unit == "%":
res = r"%.3f\linewidth" % (float(amount) / 100.0)
else:
amount_float = float(amount) * scale / 100.0
if unit in ('', "px"):
res = r"%.5f\sphinxpxdimen" % amount_float
elif unit == 'pt':
res = '%.5fbp' % amount_float
elif unit == "%":
res = r"%.5f\linewidth" % (amount_float / 100.0)
else:
res = "%.5f%s" % (amount_float, unit)
return res
class LaTeXTranslator(SphinxTranslator):
builder: "LaTeXBuilder" = None
secnumdepth = 2 # legacy sphinxhowto.cls uses this, whereas article.cls
# default is originally 3. For book/report, 2 is already LaTeX default.
ignore_missing_images = False
def __init__(self, document: nodes.document, builder: "LaTeXBuilder",
theme: "Theme") -> None:
super().__init__(document, builder)
self.body: List[str] = []
self.theme = theme
# flags
self.in_title = 0
self.in_production_list = 0
self.in_footnote = 0
self.in_caption = 0
self.in_term = 0
self.needs_linetrimming = 0
self.in_minipage = 0
self.no_latex_floats = 0
self.first_document = 1
self.this_is_the_title = 1
self.literal_whitespace = 0
self.in_parsed_literal = 0
self.compact_list = 0
self.first_param = 0
sphinxpkgoptions = []
# sort out some elements
self.elements = self.builder.context.copy()
# initial section names
self.sectionnames = LATEXSECTIONNAMES[:]
if self.theme.toplevel_sectioning == 'section':
self.sectionnames.remove('chapter')
# determine top section level
self.top_sectionlevel = 1
if self.config.latex_toplevel_sectioning:
try:
self.top_sectionlevel = \
self.sectionnames.index(self.config.latex_toplevel_sectioning)
except ValueError:
logger.warning(__('unknown %r toplevel_sectioning for class %r') %
(self.config.latex_toplevel_sectioning, self.theme.docclass))
if self.config.numfig:
self.numfig_secnum_depth = self.config.numfig_secnum_depth
if self.numfig_secnum_depth > 0: # default is 1
# numfig_secnum_depth as passed to sphinx.sty indices same names as in
# LATEXSECTIONNAMES but with -1 for part, 0 for chapter, 1 for section...
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
self.numfig_secnum_depth += self.top_sectionlevel
else:
self.numfig_secnum_depth += self.top_sectionlevel - 1
# this (minus one) will serve as minimum to LaTeX's secnumdepth
self.numfig_secnum_depth = min(self.numfig_secnum_depth,
len(LATEXSECTIONNAMES) - 1)
# if passed key value is < 1 LaTeX will act as if 0; see sphinx.sty
sphinxpkgoptions.append('numfigreset=%s' % self.numfig_secnum_depth)
else:
sphinxpkgoptions.append('nonumfigreset')
if self.config.numfig and self.config.math_numfig:
sphinxpkgoptions.append('mathnumfig')
if (self.config.language not in {None, 'en', 'ja'} and
'fncychap' not in self.config.latex_elements):
# use Sonny style if any language specified (except English)
self.elements['fncychap'] = (r'\usepackage[Sonny]{fncychap}' + CR +
r'\ChNameVar{\Large\normalfont\sffamily}' + CR +
r'\ChTitleVar{\Large\normalfont\sffamily}')
self.babel = self.builder.babel
if self.config.language and not self.babel.is_supported_language():
# emit warning if specified language is invalid
# (only emitting, nothing changed to processing)
logger.warning(__('no Babel option known for language %r'),
self.config.language)
minsecnumdepth = self.secnumdepth # 2 from legacy sphinx manual/howto
if self.document.get('tocdepth'):
# reduce tocdepth if `part` or `chapter` is used for top_sectionlevel
# tocdepth = -1: show only parts
# tocdepth = 0: show parts and chapters
# tocdepth = 1: show parts, chapters and sections
# tocdepth = 2: show parts, chapters, sections and subsections
# ...
tocdepth = self.document.get('tocdepth', 999) + self.top_sectionlevel - 2
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
tocdepth += 1 # because top_sectionlevel is shifted by -1
if tocdepth > len(LATEXSECTIONNAMES) - 2: # default is 5 <-> subparagraph
logger.warning(__('too large :maxdepth:, ignored.'))
tocdepth = len(LATEXSECTIONNAMES) - 2
self.elements['tocdepth'] = r'\setcounter{tocdepth}{%d}' % tocdepth
minsecnumdepth = max(minsecnumdepth, tocdepth)
if self.config.numfig and (self.config.numfig_secnum_depth > 0):
minsecnumdepth = max(minsecnumdepth, self.numfig_secnum_depth - 1)
if minsecnumdepth > self.secnumdepth:
self.elements['secnumdepth'] = r'\setcounter{secnumdepth}{%d}' %\
minsecnumdepth
contentsname = document.get('contentsname')
if contentsname:
self.elements['contentsname'] = self.babel_renewcommand(r'\contentsname',
contentsname)
if self.elements['maxlistdepth']:
sphinxpkgoptions.append('maxlistdepth=%s' % self.elements['maxlistdepth'])
if sphinxpkgoptions:
self.elements['sphinxpkgoptions'] = '[,%s]' % ','.join(sphinxpkgoptions)
if self.elements['sphinxsetup']:
self.elements['sphinxsetup'] = (r'\sphinxsetup{%s}' % self.elements['sphinxsetup'])
if self.elements['extraclassoptions']:
self.elements['classoptions'] += ',' + \
self.elements['extraclassoptions']
self.highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style,
latex_engine=self.config.latex_engine)
self.context: List[Any] = []
self.descstack: List[str] = []
self.tables: List[Table] = []
self.next_table_colspec: str = None
self.bodystack: List[List[str]] = []
self.footnote_restricted: Element = None
self.pending_footnotes: List[nodes.footnote_reference] = []
self.curfilestack: List[str] = []
self.handled_abbrs: Set[str] = set()
def pushbody(self, newbody: List[str]) -> None:
self.bodystack.append(self.body)
self.body = newbody
def popbody(self) -> List[str]:
body = self.body
self.body = self.bodystack.pop()
return body
def astext(self) -> str:
self.elements.update({
'body': ''.join(self.body),
'indices': self.generate_indices()
})
return self.render('latex.tex_t', self.elements)
def hypertarget(self, id: str, withdoc: bool = True, anchor: bool = True) -> str:
if withdoc:
id = self.curfilestack[-1] + ':' + id
return (r'\phantomsection' if anchor else '') + r'\label{%s}' % self.idescape(id)
def hypertarget_to(self, node: Element, anchor: bool = False) -> str:
labels = ''.join(self.hypertarget(node_id, anchor=False) for node_id in node['ids'])
if anchor:
return r'\phantomsection' + labels
else:
return labels
def hyperlink(self, id: str) -> str:
return r'{\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id: str) -> str:
return r'\autopageref*{%s}' % self.idescape(id)
def escape(self, s: str) -> str:
return texescape.escape(s, self.config.latex_engine)
def idescape(self, id: str) -> str:
return r'\detokenize{%s}' % str(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def babel_renewcommand(self, command: str, definition: str) -> str:
if self.elements['multilingual']:
prefix = r'\addto\captions%s{' % self.babel.get_language()
suffix = '}'
else: # babel is disabled (mainly for Japanese environment)
prefix = ''
suffix = ''
return r'%s\renewcommand{%s}{%s}%s' % (prefix, command, definition, suffix) + CR
def generate_indices(self) -> str:
def generate(content: List[Tuple[str, List[IndexEntry]]], collapsed: bool) -> None:
ret.append(r'\begin{sphinxtheindex}' + CR)
ret.append(r'\let\bigletter\sphinxstyleindexlettergroup' + CR)
for i, (letter, entries) in enumerate(content):
if i > 0:
ret.append(r'\indexspace' + CR)
ret.append(r'\bigletter{%s}' % self.escape(letter) + CR)
for entry in entries:
if not entry[3]:
continue
ret.append(r'\item\relax\sphinxstyleindexentry{%s}' %
self.encode(entry[0]))
if entry[4]:
# add "extra" info
ret.append(r'\sphinxstyleindexextra{%s}' % self.encode(entry[4]))
ret.append(r'\sphinxstyleindexpageref{%s:%s}' %
(entry[2], self.idescape(entry[3])) + CR)
ret.append(r'\end{sphinxtheindex}' + CR)
ret = []
# latex_domain_indices can be False/True or a list of index names
indices_config = self.config.latex_domain_indices
if indices_config:
for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
ret.append(r'\renewcommand{\indexname}{%s}' % indexcls.localname + CR)
generate(content, collapsed)
return ''.join(ret)
def render(self, template_name: str, variables: Dict) -> str:
renderer = LaTeXRenderer(latex_engine=self.config.latex_engine)
for template_dir in self.config.templates_path:
template = path.join(self.builder.confdir, template_dir,
template_name)
if path.exists(template):
return renderer.render(template, variables)
return renderer.render(template_name, variables)
@property
def table(self) -> Table:
"""Get current table."""
if self.tables:
return self.tables[-1]
else:
return None
def visit_document(self, node: Element) -> None:
self.curfilestack.append(node.get('docname', ''))
if self.first_document == 1:
# the first document is all the regular content ...
self.first_document = 0
elif self.first_document == 0:
# ... and all others are the appendices
self.body.append(CR + r'\appendix' + CR)
self.first_document = -1
if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
def depart_document(self, node: Element) -> None:
pass
def visit_start_of_file(self, node: Element) -> None:
self.curfilestack.append(node['docname'])
def depart_start_of_file(self, node: Element) -> None:
self.curfilestack.pop()
def visit_section(self, node: Element) -> None:
if not self.this_is_the_title:
self.sectionlevel += 1
self.body.append(BLANKLINE)
def depart_section(self, node: Element) -> None:
self.sectionlevel = max(self.sectionlevel - 1,
self.top_sectionlevel - 1)
def visit_problematic(self, node: Element) -> None:
self.body.append(r'{\color{red}\bfseries{}')
def depart_problematic(self, node: Element) -> None:
self.body.append('}')
def visit_topic(self, node: Element) -> None:
self.in_minipage = 1
self.body.append(CR + r'\begin{sphinxShadowBox}' + CR)
def depart_topic(self, node: Element) -> None:
self.in_minipage = 0
self.body.append(r'\end{sphinxShadowBox}' + CR)
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_productionlist(self, node: Element) -> None:
self.body.append(BLANKLINE)
self.body.append(r'\begin{productionlist}' + CR)
self.in_production_list = 1
def depart_productionlist(self, node: Element) -> None:
self.body.append(r'\end{productionlist}' + BLANKLINE)
self.in_production_list = 0
def visit_production(self, node: Element) -> None:
if node['tokenname']:
tn = node['tokenname']
self.body.append(self.hypertarget('grammar-token-' + tn))
self.body.append(r'\production{%s}{' % self.encode(tn))
else:
self.body.append(r'\productioncont{')
def depart_production(self, node: Element) -> None:
self.body.append('}' + CR)
def visit_transition(self, node: Element) -> None:
self.body.append(self.elements['transition'])
def depart_transition(self, node: Element) -> None:
pass
def visit_title(self, node: Element) -> None:
parent = node.parent
if isinstance(parent, addnodes.seealso):
# the environment already handles this
raise nodes.SkipNode
elif isinstance(parent, nodes.section):
if self.this_is_the_title:
if len(node.children) != 1 and not isinstance(node.children[0],
nodes.Text):
logger.warning(__('document title is not a single Text node'),
location=node)
if not self.elements['title']:
# text needs to be escaped since it is inserted into
# the output literally
self.elements['title'] = self.escape(node.astext())
self.this_is_the_title = 0
raise nodes.SkipNode
else:
short = ''
if list(node.traverse(nodes.image)):
short = ('[%s]' % self.escape(' '.join(clean_astext(node).split())))
try:
self.body.append(r'\%s%s{' % (self.sectionnames[self.sectionlevel], short))
except IndexError:
# just use "subparagraph", it's not numbered anyway
self.body.append(r'\%s%s{' % (self.sectionnames[-1], short))
self.context.append('}' + CR + self.hypertarget_to(node.parent))
elif isinstance(parent, nodes.topic):
self.body.append(r'\sphinxstyletopictitle{')
self.context.append('}' + CR)
elif isinstance(parent, nodes.sidebar):
self.body.append(r'\sphinxstylesidebartitle{')
self.context.append('}' + CR)
elif isinstance(parent, nodes.Admonition):
self.body.append('{')
self.context.append('}' + CR)
elif isinstance(parent, nodes.table):
# Redirect body output until title is finished.
self.pushbody([])
else:
logger.warning(__('encountered title node not in section, topic, table, '
'admonition or sidebar'),
location=node)
self.body.append(r'\sphinxstyleothertitle{')
self.context.append('}' + CR)
self.in_title = 1
def depart_title(self, node: Element) -> None:
self.in_title = 0
if isinstance(node.parent, nodes.table):
self.table.caption = self.popbody()
else:
self.body.append(self.context.pop())
def visit_subtitle(self, node: Element) -> None:
if isinstance(node.parent, nodes.sidebar):
self.body.append(r'\sphinxstylesidebarsubtitle{')
self.context.append('}' + CR)
else:
self.context.append('')
def depart_subtitle(self, node: Element) -> None:
self.body.append(self.context.pop())
#############################################################
# Domain-specific object descriptions
#############################################################
# Top-level nodes for descriptions
##################################
def visit_desc(self, node: Element) -> None:
if self.config.latex_show_urls == 'footnote':
self.body.append(BLANKLINE)
self.body.append(r'\begin{savenotes}\begin{fulllineitems}' + CR)
else:
self.body.append(BLANKLINE)
self.body.append(r'\begin{fulllineitems}' + CR)
if self.table:
self.table.has_problematic = True
def depart_desc(self, node: Element) -> None:
if self.config.latex_show_urls == 'footnote':
self.body.append(CR + r'\end{fulllineitems}\end{savenotes}' + BLANKLINE)
else:
self.body.append(CR + r'\end{fulllineitems}' + BLANKLINE)
def _visit_signature_line(self, node: Element) -> None:
for child in node:
if isinstance(child, addnodes.desc_parameterlist):
self.body.append(r'\pysiglinewithargsret{')
break
else:
self.body.append(r'\pysigline{')
def _depart_signature_line(self, node: Element) -> None:
self.body.append('}')
def visit_desc_signature(self, node: Element) -> None:
if node.parent['objtype'] != 'describe' and node['ids']:
hyper = self.hypertarget(node['ids'][0])
else:
hyper = ''
self.body.append(hyper)
if not node.get('is_multiline'):
self._visit_signature_line(node)
else:
self.body.append('%' + CR)
self.body.append(r'\pysigstartmultiline' + CR)
def depart_desc_signature(self, node: Element) -> None:
if not node.get('is_multiline'):
self._depart_signature_line(node)
else:
self.body.append('%' + CR)
self.body.append(r'\pysigstopmultiline')
def visit_desc_signature_line(self, node: Element) -> None:
self._visit_signature_line(node)
def depart_desc_signature_line(self, node: Element) -> None:
self._depart_signature_line(node)
def visit_desc_content(self, node: Element) -> None:
pass
def depart_desc_content(self, node: Element) -> None:
pass
def visit_desc_inline(self, node: Element) -> None:
self.body.append(r'\sphinxcode{\sphinxupquote{')
def depart_desc_inline(self, node: Element) -> None:
self.body.append('}}')
# Nodes for high-level structure in signatures
##############################################
def visit_desc_name(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_name(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_addname(self, node: Element) -> None:
self.body.append(r'\sphinxcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_addname(self, node: Element) -> None:
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_type(self, node: Element) -> None:
pass
def depart_desc_type(self, node: Element) -> None:
pass
def visit_desc_returns(self, node: Element) -> None:
self.body.append(r'{ $\rightarrow$ ')
def depart_desc_returns(self, node: Element) -> None:
self.body.append(r'}')
def visit_desc_parameterlist(self, node: Element) -> None:
# close name, open parameterlist
self.body.append('}{')
self.first_param = 1
def depart_desc_parameterlist(self, node: Element) -> None:
# close parameterlist, open return annotation
self.body.append('}{')
def visit_desc_parameter(self, node: Element) -> None:
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
if not node.hasattr('noemph'):
self.body.append(r'\emph{')
def depart_desc_parameter(self, node: Element) -> None:
if not node.hasattr('noemph'):
self.body.append('}')
def visit_desc_optional(self, node: Element) -> None:
self.body.append(r'\sphinxoptional{')
def depart_desc_optional(self, node: Element) -> None:
self.body.append('}')
def visit_desc_annotation(self, node: Element) -> None:
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
def depart_desc_annotation(self, node: Element) -> None:
self.body.append('}}')
##############################################
def visit_seealso(self, node: Element) -> None:
self.body.append(BLANKLINE)
self.body.append(r'\sphinxstrong{%s:}' % admonitionlabels['seealso'] + CR)
self.body.append(r'\nopagebreak' + BLANKLINE)
def depart_seealso(self, node: Element) -> None:
self.body.append(BLANKLINE)
def visit_rubric(self, node: Element) -> None:
if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
raise nodes.SkipNode
self.body.append(r'\subsubsection*{')
self.context.append('}' + CR)
self.in_title = 1
def depart_rubric(self, node: Element) -> None:
self.in_title = 0
self.body.append(self.context.pop())
def visit_footnote(self, node: Element) -> None:
self.in_footnote += 1
label = cast(nodes.label, node[0])
if 'auto' not in node:
self.body.append(r'\sphinxstepexplicit ')
if self.in_parsed_literal:
self.body.append(r'\begin{footnote}[%s]' % label.astext())
else:
self.body.append('%' + CR)
self.body.append(r'\begin{footnote}[%s]' % label.astext())
if 'auto' not in node:
self.body.append(r'\phantomsection'
r'\label{\thesphinxscope.%s}%%' % label.astext() + CR)
self.body.append(r'\sphinxAtStartFootnote' + CR)
def depart_footnote(self, node: Element) -> None:
if self.in_parsed_literal:
self.body.append(r'\end{footnote}')
else:
self.body.append('%' + CR)
self.body.append(r'\end{footnote}')
self.in_footnote -= 1
def visit_label(self, node: Element) -> None:
raise nodes.SkipNode
def visit_tabular_col_spec(self, node: Element) -> None:
self.next_table_colspec = node['spec']
raise nodes.SkipNode
def visit_table(self, node: Element) -> None:
if len(self.tables) == 1:
if self.table.get_table_type() == 'longtable':
raise UnsupportedError(
'%s:%s: longtable does not support nesting a table.' %
(self.curfilestack[-1], node.line or ''))
else:
# change type of parent table to tabular
# see https://groups.google.com/d/msg/sphinx-users/7m3NeOBixeo/9LKP2B4WBQAJ
self.table.has_problematic = True
elif len(self.tables) > 2:
raise UnsupportedError(
'%s:%s: deeply nested tables are not implemented.' %
(self.curfilestack[-1], node.line or ''))
self.tables.append(Table(node))
if self.next_table_colspec:
self.table.colspec = '{%s}' % self.next_table_colspec + CR
if 'colwidths-given' in node.get('classes', []):
logger.info(__('both tabularcolumns and :widths: option are given. '
':widths: is ignored.'), location=node)
self.next_table_colspec = None
def depart_table(self, node: Element) -> None:
labels = self.hypertarget_to(node)
table_type = self.table.get_table_type()
table = self.render(table_type + '.tex_t',
dict(table=self.table, labels=labels))
self.body.append(BLANKLINE)
self.body.append(table)
self.body.append(CR)
self.tables.pop()
def visit_colspec(self, node: Element) -> None:
self.table.colcount += 1
if 'colwidth' in node:
self.table.colwidths.append(node['colwidth'])
if 'stub' in node:
self.table.stubs.append(self.table.colcount - 1)
def depart_colspec(self, node: Element) -> None:
pass
def visit_tgroup(self, node: Element) -> None:
pass
def depart_tgroup(self, node: Element) -> None:
pass
def visit_thead(self, node: Element) -> None:
# Redirect head output until header is finished.
self.pushbody(self.table.header)
def depart_thead(self, node: Element) -> None:
self.popbody()
def visit_tbody(self, node: Element) -> None:
# Redirect body output until table is finished.
self.pushbody(self.table.body)
def depart_tbody(self, node: Element) -> None:
self.popbody()
def visit_row(self, node: Element) -> None:
self.table.col = 0
# fill columns if the row starts with the bottom of multirow cell
while True:
cell = self.table.cell(self.table.row, self.table.col)
if cell is None: # not a bottom of multirow cell
break
else: # a bottom of multirow cell
self.table.col += cell.width
if cell.col:
self.body.append('&')
if cell.width == 1:
# insert suitable strut for equalizing row heights in given multirow
self.body.append(r'\sphinxtablestrut{%d}' % cell.cell_id)
else: # use \multicolumn for wide multirow cell
self.body.append(r'\multicolumn{%d}{|l|}{\sphinxtablestrut{%d}}' %
(cell.width, cell.cell_id))
def depart_row(self, node: Element) -> None:
self.body.append(r'\\' + CR)
cells = [self.table.cell(self.table.row, i) for i in range(self.table.colcount)]
underlined = [cell.row + cell.height == self.table.row + 1 for cell in cells]
if all(underlined):
self.body.append(r'\hline')
else:
i = 0
underlined.extend([False]) # sentinel
while i < len(underlined):
if underlined[i] is True:
j = underlined[i:].index(False)
self.body.append(r'\cline{%d-%d}' % (i + 1, i + j))
i += j
i += 1
self.table.row += 1
def visit_entry(self, node: Element) -> None:
if self.table.col > 0:
self.body.append('&')
self.table.add_cell(node.get('morerows', 0) + 1, node.get('morecols', 0) + 1)
cell = self.table.cell()
context = ''
if cell.width > 1:
if self.config.latex_use_latex_multicolumn:
if self.table.col == 0:
self.body.append(r'\multicolumn{%d}{|l|}{%%' % cell.width + CR)
else:
self.body.append(r'\multicolumn{%d}{l|}{%%' % cell.width + CR)
context = '}%' + CR
else:
self.body.append(r'\sphinxstartmulticolumn{%d}%%' % cell.width + CR)
context = r'\sphinxstopmulticolumn' + CR
if cell.height > 1:
# \sphinxmultirow 2nd arg "cell_id" will serve as id for LaTeX macros as well
self.body.append(r'\sphinxmultirow{%d}{%d}{%%' % (cell.height, cell.cell_id) + CR)
context = '}%' + CR + context
if cell.width > 1 or cell.height > 1:
self.body.append(r'\begin{varwidth}[t]{\sphinxcolwidth{%d}{%d}}'
% (cell.width, self.table.colcount) + CR)
context = (r'\par' + CR + r'\vskip-\baselineskip'
r'\vbox{\hbox{\strut}}\end{varwidth}%' + CR + context)
self.needs_linetrimming = 1
if len(list(node.traverse(nodes.paragraph))) >= 2:
self.table.has_oldproblematic = True
if isinstance(node.parent.parent, nodes.thead) or (cell.col in self.table.stubs):
if len(node) == 1 and isinstance(node[0], nodes.paragraph) and node.astext() == '':
pass
else:
self.body.append(r'\sphinxstyletheadfamily ')
if self.needs_linetrimming:
self.pushbody([])
self.context.append(context)
def depart_entry(self, node: Element) -> None:
if self.needs_linetrimming:
self.needs_linetrimming = 0
body = self.popbody()
# Remove empty lines from top of merged cell
while body and body[0] == CR:
body.pop(0)
self.body.extend(body)
self.body.append(self.context.pop())
cell = self.table.cell()
self.table.col += cell.width
# fill columns if next ones are a bottom of wide-multirow cell
while True:
nextcell = self.table.cell()
if nextcell is None: # not a bottom of multirow cell
break
else: # a bottom part of multirow cell
self.table.col += nextcell.width
self.body.append('&')
if nextcell.width == 1:
# insert suitable strut for equalizing row heights in multirow
# they also serve to clear colour panels which would hide the text
self.body.append(r'\sphinxtablestrut{%d}' % nextcell.cell_id)
else:
# use \multicolumn for wide multirow cell
self.body.append(r'\multicolumn{%d}{l|}{\sphinxtablestrut{%d}}' %
(nextcell.width, nextcell.cell_id))
def visit_acks(self, node: Element) -> None:
# this is a list in the source, but should be rendered as a
# comma-separated list here
bullet_list = cast(nodes.bullet_list, node[0])
list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append(BLANKLINE)
self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append(BLANKLINE)
raise nodes.SkipNode
def visit_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append(r'\begin{itemize}' + CR)
if self.table:
self.table.has_problematic = True
def depart_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append(r'\end{itemize}' + CR)
def visit_enumerated_list(self, node: Element) -> None:
def get_enumtype(node: Element) -> str:
enumtype = node.get('enumtype', 'arabic')
if 'alpha' in enumtype and 26 < node.get('start', 0) + len(node):
# fallback to arabic if alphabet counter overflows
enumtype = 'arabic'
return enumtype
def get_nested_level(node: Element) -> int:
if node is None:
return 0
elif isinstance(node, nodes.enumerated_list):
return get_nested_level(node.parent) + 1
else:
return get_nested_level(node.parent)
enum = "enum%s" % toRoman(get_nested_level(node)).lower()
enumnext = "enum%s" % toRoman(get_nested_level(node) + 1).lower()
style = ENUMERATE_LIST_STYLE.get(get_enumtype(node))
prefix = node.get('prefix', '')
suffix = node.get('suffix', '.')
self.body.append(r'\begin{enumerate}' + CR)
self.body.append(r'\sphinxsetlistlabels{%s}{%s}{%s}{%s}{%s}%%' %
(style, enum, enumnext, prefix, suffix) + CR)
if 'start' in node:
self.body.append(r'\setcounter{%s}{%d}' % (enum, node['start'] - 1) + CR)
if self.table:
self.table.has_problematic = True
def depart_enumerated_list(self, node: Element) -> None:
self.body.append(r'\end{enumerate}' + CR)
def visit_list_item(self, node: Element) -> None:
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append(r'\item {} ')
def depart_list_item(self, node: Element) -> None:
self.body.append(CR)
def visit_definition_list(self, node: Element) -> None:
self.body.append(r'\begin{description}' + CR)
if self.table:
self.table.has_problematic = True
def depart_definition_list(self, node: Element) -> None:
self.body.append(r'\end{description}' + CR)
def visit_definition_list_item(self, node: Element) -> None:
pass
def depart_definition_list_item(self, node: Element) -> None:
pass
def visit_term(self, node: Element) -> None:
self.in_term += 1
ctx = ''
if node.get('ids'):
ctx = r'\phantomsection'
for node_id in node['ids']:
ctx += self.hypertarget(node_id, anchor=False)
ctx += r'}'
self.body.append(r'\sphinxlineitem{')
self.context.append(ctx)
def depart_term(self, node: Element) -> None:
self.body.append(self.context.pop())
self.in_term -= 1
def visit_classifier(self, node: Element) -> None:
self.body.append('{[}')
def depart_classifier(self, node: Element) -> None:
self.body.append('{]}')
def visit_definition(self, node: Element) -> None:
pass
def depart_definition(self, node: Element) -> None:
self.body.append(CR)
def visit_field_list(self, node: Element) -> None:
self.body.append(r'\begin{quote}\begin{description}' + CR)
if self.table:
self.table.has_problematic = True
def depart_field_list(self, node: Element) -> None:
self.body.append(r'\end{description}\end{quote}' + CR)
def visit_field(self, node: Element) -> None:
pass
def depart_field(self, node: Element) -> None:
pass
visit_field_name = visit_term
depart_field_name = depart_term
visit_field_body = visit_definition
depart_field_body = depart_definition
def visit_paragraph(self, node: Element) -> None:
index = node.parent.index(node)
if (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
# insert blank line, if the paragraph follows a non-paragraph node in a compound
self.body.append(r'\noindent' + CR)
elif index == 1 and isinstance(node.parent, (nodes.footnote, footnotetext)):
# don't insert blank line, if the paragraph is second child of a footnote
# (first one is label node)
pass
else:
# the \sphinxAtStartPar is to allow hyphenation of first word of
# a paragraph in narrow contexts such as in a table cell
# added as two items (cf. line trimming in depart_entry())
self.body.extend([CR, r'\sphinxAtStartPar' + CR])
def depart_paragraph(self, node: Element) -> None:
self.body.append(CR)
def visit_centered(self, node: Element) -> None:
self.body.append(CR + r'\begin{center}')
if self.table:
self.table.has_problematic = True
def depart_centered(self, node: Element) -> None:
self.body.append(CR + r'\end{center}')
def visit_hlist(self, node: Element) -> None:
self.compact_list += 1
ncolumns = node['ncolumns']
if self.compact_list > 1:
self.body.append(r'\setlength{\multicolsep}{0pt}' + CR)
self.body.append(r'\begin{multicols}{' + ncolumns + r'}\raggedright' + CR)
self.body.append(r'\begin{itemize}\setlength{\itemsep}{0pt}'
r'\setlength{\parskip}{0pt}' + CR)
if self.table:
self.table.has_problematic = True
def depart_hlist(self, node: Element) -> None:
self.compact_list -= 1
self.body.append(r'\end{itemize}\raggedcolumns\end{multicols}' + CR)
def visit_hlistcol(self, node: Element) -> None:
pass
def depart_hlistcol(self, node: Element) -> None:
# \columnbreak would guarantee same columns as in html output. But
# some testing with long items showed that columns may be too uneven.
# And in case only of short items, the automatic column breaks should
# match the ones pre-computed by the hlist() directive.
# self.body.append(r'\columnbreak\n')
pass
def latex_image_length(self, width_str: str, scale: int = 100) -> str:
try:
return rstdim_to_latexdim(width_str, scale)
except ValueError:
logger.warning(__('dimension unit %s is invalid. Ignored.'), width_str)
return None
def is_inline(self, node: Element) -> bool:
"""Check whether a node represents an inline element."""
return isinstance(node.parent, nodes.TextElement)
def visit_image(self, node: Element) -> None:
pre: List[str] = [] # in reverse order
post: List[str] = []
include_graphics_options = []
has_hyperlink = isinstance(node.parent, nodes.reference)
if has_hyperlink:
is_inline = self.is_inline(node.parent)
else:
is_inline = self.is_inline(node)
if 'width' in node:
if 'scale' in node:
w = self.latex_image_length(node['width'], node['scale'])
else:
w = self.latex_image_length(node['width'])
if w:
include_graphics_options.append('width=%s' % w)
if 'height' in node:
if 'scale' in node:
h = self.latex_image_length(node['height'], node['scale'])
else:
h = self.latex_image_length(node['height'])
if h:
include_graphics_options.append('height=%s' % h)
if 'scale' in node:
if not include_graphics_options:
# if no "width" nor "height", \sphinxincludegraphics will fit
# to the available text width if oversized after rescaling.
include_graphics_options.append('scale=%s'
% (float(node['scale']) / 100.0))
if 'align' in node:
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
(1, 'middle'): (r'\raisebox{-0.5\height}{', '}'),
(1, 'bottom'): (r'\raisebox{-\height}{', '}'),
(0, 'center'): (r'{\hspace*{\fill}', r'\hspace*{\fill}}'),
# These 2 don't exactly do the right thing. The image should
# be floated alongside the paragraph. See
# https://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
(0, 'left'): ('{', r'\hspace*{\fill}}'),
(0, 'right'): (r'{\hspace*{\fill}', '}'),
}
try:
pre.append(align_prepost[is_inline, node['align']][0])
post.append(align_prepost[is_inline, node['align']][1])
except KeyError:
pass
if self.in_parsed_literal:
pre.append(r'{\sphinxunactivateextrasandspace ')
post.append('}')
if not is_inline and not has_hyperlink:
pre.append(CR + r'\noindent')
post.append(CR)
pre.reverse()
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
self.body.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % ','.join(include_graphics_options)
base, ext = path.splitext(uri)
if self.in_title and base:
# Lowercase tokens forcely because some fncychap themes capitalize
# the options of \sphinxincludegraphics unexpectedly (ex. WIDTH=...).
self.body.append(r'\lowercase{\sphinxincludegraphics%s}{{%s}%s}' %
(options, base, ext))
else:
self.body.append(r'\sphinxincludegraphics%s{{%s}%s}' %
(options, base, ext))
self.body.extend(post)
def depart_image(self, node: Element) -> None:
pass
def visit_figure(self, node: Element) -> None:
align = self.elements['figure_align']
if self.no_latex_floats:
align = "H"
if self.table:
# TODO: support align option
if 'width' in node:
length = self.latex_image_length(node['width'])
if length:
self.body.append(r'\begin{sphinxfigure-in-table}[%s]' % length + CR)
self.body.append(r'\centering' + CR)
else:
self.body.append(r'\begin{sphinxfigure-in-table}' + CR)
self.body.append(r'\centering' + CR)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append(r'\capstart')
self.context.append(r'\end{sphinxfigure-in-table}\relax' + CR)
elif node.get('align', '') in ('left', 'right'):
length = None
if 'width' in node:
length = self.latex_image_length(node['width'])
elif isinstance(node[0], nodes.image) and 'width' in node[0]:
length = self.latex_image_length(node[0]['width'])
self.body.append(BLANKLINE) # Insert a blank line to prevent infinite loop
# https://github.com/sphinx-doc/sphinx/issues/7059
self.body.append(r'\begin{wrapfigure}{%s}{%s}' %
('r' if node['align'] == 'right' else 'l', length or '0pt') + CR)
self.body.append(r'\centering')
self.context.append(r'\end{wrapfigure}' + CR)
elif self.in_minipage:
self.body.append(CR + r'\begin{center}')
self.context.append(r'\end{center}' + CR)
else:
self.body.append(CR + r'\begin{figure}[%s]' % align + CR)
self.body.append(r'\centering' + CR)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append(r'\capstart' + CR)
self.context.append(r'\end{figure}' + CR)
def depart_figure(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_caption(self, node: Element) -> None:
self.in_caption += 1
if isinstance(node.parent, captioned_literal_block):
self.body.append(r'\sphinxSetupCaptionForVerbatim{')
elif self.in_minipage and isinstance(node.parent, nodes.figure):
self.body.append(r'\captionof{figure}{')
elif self.table and node.parent.tagname == 'figure':
self.body.append(r'\sphinxfigcaption{')
else:
self.body.append(r'\caption{')
def depart_caption(self, node: Element) -> None:
self.body.append('}')
if isinstance(node.parent, nodes.figure):
labels = self.hypertarget_to(node.parent)
self.body.append(labels)
self.in_caption -= 1
def visit_legend(self, node: Element) -> None:
self.body.append(CR + r'\begin{sphinxlegend}')
def depart_legend(self, node: Element) -> None:
self.body.append(r'\end{sphinxlegend}' + CR)
def visit_admonition(self, node: Element) -> None:
self.body.append(CR + r'\begin{sphinxadmonition}{note}')
self.no_latex_floats += 1
def depart_admonition(self, node: Element) -> None:
self.body.append(r'\end{sphinxadmonition}' + CR)
self.no_latex_floats -= 1
def _visit_named_admonition(self, node: Element) -> None:
label = admonitionlabels[node.tagname]
self.body.append(CR + r'\begin{sphinxadmonition}{%s}{%s:}' %
(node.tagname, label))
self.no_latex_floats += 1
def _depart_named_admonition(self, node: Element) -> None:
self.body.append(r'\end{sphinxadmonition}' + CR)
self.no_latex_floats -= 1
visit_attention = _visit_named_admonition
depart_attention = _depart_named_admonition
visit_caution = _visit_named_admonition
depart_caution = _depart_named_admonition
visit_danger = _visit_named_admonition
depart_danger = _depart_named_admonition
visit_error = _visit_named_admonition
depart_error = _depart_named_admonition
visit_hint = _visit_named_admonition
depart_hint = _depart_named_admonition
visit_important = _visit_named_admonition
depart_important = _depart_named_admonition
visit_note = _visit_named_admonition
depart_note = _depart_named_admonition
visit_tip = _visit_named_admonition
depart_tip = _depart_named_admonition
visit_warning = _visit_named_admonition
depart_warning = _depart_named_admonition
def visit_versionmodified(self, node: Element) -> None:
pass
def depart_versionmodified(self, node: Element) -> None:
pass
def visit_target(self, node: Element) -> None:
def add_target(id: str) -> None:
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
return
# equations also need no extra blank line nor hypertarget
# TODO: fix this dependency on mathbase extension internals
if id.startswith('equation-'):
return
# insert blank line, if the target follows a paragraph node
index = node.parent.index(node)
if index > 0 and isinstance(node.parent[index - 1], nodes.paragraph):
self.body.append(CR)
# do not generate \phantomsection in \section{}
anchor = not self.in_title
self.body.append(self.hypertarget(id, anchor=anchor))
# skip if visitor for next node supports hyperlink
next_node: Node = node
while isinstance(next_node, nodes.target):
next_node = next_node.next_node(ascend=True)
domain = cast(StandardDomain, self.builder.env.get_domain('std'))
if isinstance(next_node, HYPERLINK_SUPPORT_NODES):
return
elif domain.get_enumerable_node_type(next_node) and domain.get_numfig_title(next_node):
return
if 'refuri' in node:
return
if 'anonymous' in node:
return
if node.get('refid'):
prev_node = get_prev_node(node)
if isinstance(prev_node, nodes.reference) and node['refid'] == prev_node['refid']:
# a target for a hyperlink reference having alias
pass
else:
add_target(node['refid'])
for id in node['ids']:
add_target(id)
def depart_target(self, node: Element) -> None:
pass
def visit_attribution(self, node: Element) -> None:
self.body.append(CR + r'\begin{flushright}' + CR)
self.body.append('---')
def depart_attribution(self, node: Element) -> None:
self.body.append(CR + r'\end{flushright}' + CR)
def visit_index(self, node: Element) -> None:
def escape(value: str) -> str:
value = self.encode(value)
value = value.replace(r'\{', r'\sphinxleftcurlybrace{}')
value = value.replace(r'\}', r'\sphinxrightcurlybrace{}')
value = value.replace('"', '""')
value = value.replace('@', '"@')
value = value.replace('!', '"!')
value = value.replace('|', r'\textbar{}')
return value
def style(string: str) -> str:
match = EXTRA_RE.match(string)
if match:
return match.expand(r'\\spxentry{\1}\\spxextra{\2}')
else:
return r'\spxentry{%s}' % string
if not node.get('inline', True):
self.body.append(CR)
entries = node['entries']
for type, string, tid, ismain, key_ in entries:
m = ''
if ismain:
m = '|spxpagem'
try:
if type == 'single':
try:
p1, p2 = [escape(x) for x in split_into(2, 'single', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}' % (p1, P1, p2, P2, m))
except ValueError:
p = escape(split_into(1, 'single', string)[0])
P = style(p)
self.body.append(r'\index{%s@%s%s}' % (p, P, m))
elif type == 'pair':
p1, p2 = [escape(x) for x in split_into(2, 'pair', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}\index{%s@%s!%s@%s%s}' %
(p1, P1, p2, P2, m, p2, P2, p1, P1, m))
elif type == 'triple':
p1, p2, p3 = [escape(x) for x in split_into(3, 'triple', string)]
P1, P2, P3 = style(p1), style(p2), style(p3)
self.body.append(
r'\index{%s@%s!%s %s@%s %s%s}'
r'\index{%s@%s!%s, %s@%s, %s%s}'
r'\index{%s@%s!%s %s@%s %s%s}' %
(p1, P1, p2, p3, P2, P3, m,
p2, P2, p3, p1, P3, P1, m,
p3, P3, p1, p2, P1, P2, m))
elif type == 'see':
p1, p2 = [escape(x) for x in split_into(2, 'see', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
elif type == 'seealso':
p1, p2 = [escape(x) for x in split_into(2, 'seealso', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
else:
logger.warning(__('unknown index entry type %s found'), type)
except ValueError as err:
logger.warning(str(err))
if not node.get('inline', True):
self.body.append(r'\ignorespaces ')
raise nodes.SkipNode
def visit_raw(self, node: Element) -> None:
if not self.is_inline(node):
self.body.append(CR)
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
if not self.is_inline(node):
self.body.append(CR)
raise nodes.SkipNode
def visit_reference(self, node: Element) -> None:
if not self.in_title:
for id in node.get('ids'):
anchor = not self.in_caption
self.body += self.hypertarget(id, anchor=anchor)
if not self.is_inline(node):
self.body.append(CR)
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if self.in_title or not uri:
self.context.append('')
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.body.append(self.hyperlink(id))
self.body.append(r'\emph{')
if self.config.latex_show_pagerefs and not \
self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.body.append(self.hyperlink(id))
if (len(node) and
isinstance(node[0], nodes.Element) and
'std-term' in node[0].get('classes', [])):
# don't add a pageref for glossary terms
self.context.append('}}}')
# mark up as termreference
self.body.append(r'\sphinxtermref{')
else:
self.body.append(r'\sphinxcrossref{')
if self.config.latex_show_pagerefs and not self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
else:
if len(node) == 1 and uri == node[0]:
if node.get('nolinkurl'):
self.body.append(r'\sphinxnolinkurl{%s}' % self.encode_uri(uri))
else:
self.body.append(r'\sphinxurl{%s}' % self.encode_uri(uri))
raise nodes.SkipNode
else:
self.body.append(r'\sphinxhref{%s}{' % self.encode_uri(uri))
self.context.append('}')
def depart_reference(self, node: Element) -> None:
self.body.append(self.context.pop())
if not self.is_inline(node):
self.body.append(CR)
def visit_number_reference(self, node: Element) -> None:
if node.get('refid'):
id = self.curfilestack[-1] + ':' + node['refid']
else:
id = node.get('refuri', '')[1:].replace('#', ':')
title = self.escape(node.get('title', '%s')).replace(r'\%s', '%s')
if r'\{name\}' in title or r'\{number\}' in title:
# new style format (cf. "Fig.%{number}")
title = title.replace(r'\{name\}', '{name}').replace(r'\{number\}', '{number}')
text = escape_abbr(title).format(name=r'\nameref{%s}' % self.idescape(id),
number=r'\ref{%s}' % self.idescape(id))
else:
# old style format (cf. "Fig.%{number}")
text = escape_abbr(title) % (r'\ref{%s}' % self.idescape(id))
hyperref = r'\hyperref[%s]{%s}' % (self.idescape(id), text)
self.body.append(hyperref)
raise nodes.SkipNode
def visit_download_reference(self, node: Element) -> None:
pass
def depart_download_reference(self, node: Element) -> None:
pass
def visit_pending_xref(self, node: Element) -> None:
pass
def depart_pending_xref(self, node: Element) -> None:
pass
def visit_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleemphasis{')
def depart_emphasis(self, node: Element) -> None:
self.body.append('}')
def visit_literal_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralemphasis{\sphinxupquote{')
def depart_literal_emphasis(self, node: Element) -> None:
self.body.append('}}')
def visit_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstylestrong{')
def depart_strong(self, node: Element) -> None:
self.body.append('}')
def visit_literal_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralstrong{\sphinxupquote{')
def depart_literal_strong(self, node: Element) -> None:
self.body.append('}}')
def visit_abbreviation(self, node: Element) -> None:
abbr = node.astext()
self.body.append(r'\sphinxstyleabbreviation{')
# spell out the explanation once
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append('} (%s)' % self.encode(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_manpage(self, node: Element) -> None:
return self.visit_literal_emphasis(node)
def depart_manpage(self, node: Element) -> None:
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node: Element) -> None:
self.body.append(r'\sphinxtitleref{')
def depart_title_reference(self, node: Element) -> None:
self.body.append('}')
def visit_thebibliography(self, node: Element) -> None:
citations = cast(Iterable[nodes.citation], node)
labels = (cast(nodes.label, citation[0]) for citation in citations)
longest_label = max((label.astext() for label in labels), key=len)
if len(longest_label) > MAX_CITATION_LABEL_LENGTH:
# adjust max width of citation labels not to break the layout
longest_label = longest_label[:MAX_CITATION_LABEL_LENGTH]
self.body.append(CR + r'\begin{sphinxthebibliography}{%s}' %
self.encode(longest_label) + CR)
def depart_thebibliography(self, node: Element) -> None:
self.body.append(r'\end{sphinxthebibliography}' + CR)
def visit_citation(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append(r'\bibitem[%s]{%s:%s}' % (self.encode(label.astext()),
node['docname'], node['ids'][0]))
def depart_citation(self, node: Element) -> None:
pass
def visit_citation_reference(self, node: Element) -> None:
if self.in_title:
pass
else:
self.body.append(r'\sphinxcite{%s:%s}' % (node['docname'], node['refname']))
raise nodes.SkipNode
def depart_citation_reference(self, node: Element) -> None:
pass
def visit_literal(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\sphinxstyleliteralintitle{\sphinxupquote{')
elif 'kbd' in node['classes']:
self.body.append(r'\sphinxkeyboard{\sphinxupquote{')
else:
self.body.append(r'\sphinxcode{\sphinxupquote{')
def depart_literal(self, node: Element) -> None:
self.body.append('}}')
def visit_footnote_reference(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footnotemark(self, node: Element) -> None:
self.body.append(r'\sphinxfootnotemark[')
def depart_footnotemark(self, node: Element) -> None:
self.body.append(']')
def visit_footnotetext(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append('%' + CR)
self.body.append(r'\begin{footnotetext}[%s]'
r'\phantomsection\label{\thesphinxscope.%s}%%'
% (label.astext(), label.astext()) + CR)
self.body.append(r'\sphinxAtStartFootnote' + CR)
def depart_footnotetext(self, node: Element) -> None:
# the \ignorespaces in particular for after table header use
self.body.append('%' + CR)
self.body.append(r'\end{footnotetext}\ignorespaces ')
def visit_captioned_literal_block(self, node: Element) -> None:
pass
def depart_captioned_literal_block(self, node: Element) -> None:
pass
def visit_literal_block(self, node: Element) -> None:
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.in_parsed_literal += 1
self.body.append(r'\begin{sphinxalltt}' + CR)
else:
labels = self.hypertarget_to(node)
if isinstance(node.parent, captioned_literal_block):
labels += self.hypertarget_to(node.parent)
if labels and not self.in_footnote:
self.body.append(CR + r'\def\sphinxLiteralBlockLabel{' + labels + '}')
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
highlight_args = node.get('highlight_args', {})
highlight_args['force'] = node.get('force', False)
opts = self.config.highlight_options.get(lang, {})
hlcode = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, linenos=linenos,
location=node, **highlight_args
)
if self.in_footnote:
self.body.append(CR + r'\sphinxSetupCodeBlockInFootnote')
hlcode = hlcode.replace(r'\begin{Verbatim}',
r'\begin{sphinxVerbatim}')
# if in table raise verbatim flag to avoid "tabulary" environment
# and opt for sphinxVerbatimintable to handle caption & long lines
elif self.table:
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.replace(r'\begin{Verbatim}',
r'\begin{sphinxVerbatimintable}')
else:
hlcode = hlcode.replace(r'\begin{Verbatim}',
r'\begin{sphinxVerbatim}')
# get consistent trailer
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
if self.table and not self.in_footnote:
hlcode += r'\end{sphinxVerbatimintable}'
else:
hlcode += r'\end{sphinxVerbatim}'
hllines = str(highlight_args.get('hl_lines', []))[1:-1]
if hllines:
self.body.append(CR + r'\fvset{hllines={, %s,}}%%' % hllines)
self.body.append(CR + hlcode + CR)
if hllines:
self.body.append(r'\sphinxresetverbatimhllines' + CR)
raise nodes.SkipNode
def depart_literal_block(self, node: Element) -> None:
self.body.append(CR + r'\end{sphinxalltt}' + CR)
self.in_parsed_literal -= 1
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line(self, node: Element) -> None:
self.body.append(r'\item[] ')
def depart_line(self, node: Element) -> None:
self.body.append(CR)
def visit_line_block(self, node: Element) -> None:
if isinstance(node.parent, nodes.line_block):
self.body.append(r'\item[]' + CR)
self.body.append(r'\begin{DUlineblock}{\DUlineblockindent}' + CR)
else:
self.body.append(CR + r'\begin{DUlineblock}{0em}' + CR)
if self.table:
self.table.has_problematic = True
def depart_line_block(self, node: Element) -> None:
self.body.append(r'\end{DUlineblock}' + CR)
def visit_block_quote(self, node: Element) -> None:
# If the block quote contains a single object and that object
# is a list, then generate a list not a block quote.
# This lets us indent lists.
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append(r'\begin{quote}' + CR)
if self.table:
self.table.has_problematic = True
def depart_block_quote(self, node: Element) -> None:
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append(r'\end{quote}' + CR)
# option node handling copied from docutils' latex writer
def visit_option(self, node: Element) -> None:
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node: Element) -> None:
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node: Element) -> None:
"""The delimiter between an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node: Element) -> None:
pass
def visit_option_group(self, node: Element) -> None:
self.body.append(r'\item [')
# flag for first option
self.context.append(0)
def depart_option_group(self, node: Element) -> None:
self.context.pop() # the flag
self.body.append('] ')
def visit_option_list(self, node: Element) -> None:
self.body.append(r'\begin{optionlist}{3cm}' + CR)
if self.table:
self.table.has_problematic = True
def depart_option_list(self, node: Element) -> None:
self.body.append(r'\end{optionlist}' + CR)
def visit_option_list_item(self, node: Element) -> None:
pass
def depart_option_list_item(self, node: Element) -> None:
pass
def visit_option_string(self, node: Element) -> None:
ostring = node.astext()
self.body.append(self.encode(ostring))
raise nodes.SkipNode
def visit_description(self, node: Element) -> None:
self.body.append(' ')
def depart_description(self, node: Element) -> None:
pass
def visit_superscript(self, node: Element) -> None:
self.body.append(r'$^{\text{')
def depart_superscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_subscript(self, node: Element) -> None:
self.body.append(r'$_{\text{')
def depart_subscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_inline(self, node: Element) -> None:
classes = node.get('classes', [])
if classes in [['menuselection']]:
self.body.append(r'\sphinxmenuselection{')
self.context.append('}')
elif classes in [['guilabel']]:
self.body.append(r'\sphinxguilabel{')
self.context.append('}')
elif classes in [['accelerator']]:
self.body.append(r'\sphinxaccelerator{')
self.context.append('}')
elif classes and not self.in_title:
self.body.append(r'\DUrole{%s}{' % ','.join(classes))
self.context.append('}')
else:
self.context.append('')
def depart_inline(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_generated(self, node: Element) -> None:
pass
def depart_generated(self, node: Element) -> None:
pass
def visit_compound(self, node: Element) -> None:
pass
def depart_compound(self, node: Element) -> None:
pass
def visit_container(self, node: Element) -> None:
classes = node.get('classes', [])
for c in classes:
self.body.append('\n\\begin{sphinxuseclass}{%s}' % c)
def depart_container(self, node: Element) -> None:
classes = node.get('classes', [])
for c in classes:
self.body.append('\n\\end{sphinxuseclass}')
def visit_decoration(self, node: Element) -> None:
pass
def depart_decoration(self, node: Element) -> None:
pass
# docutils-generated elements that we don't support
def visit_header(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footer(self, node: Element) -> None:
raise nodes.SkipNode
def visit_docinfo(self, node: Element) -> None:
raise nodes.SkipNode
# text handling
def encode(self, text: str) -> str:
text = self.escape(text)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace(CR, r'~\\' + CR).replace(' ', '~')
return text
def encode_uri(self, text: str) -> str:
# TODO: it is probably wrong that this uses texescape.escape()
# this must be checked against hyperref package exact dealings
# mainly, %, #, {, } and \ need escaping via a \ escape
# in \href, the tilde is allowed and must be represented literally
return self.encode(text).replace(r'\textasciitilde{}', '~').\
replace(r'\sphinxhyphen{}', '-').\
replace(r'\textquotesingle{}', "'")
def visit_Text(self, node: Text) -> None:
text = self.encode(node.astext())
self.body.append(text)
def depart_Text(self, node: Text) -> None:
pass
def visit_comment(self, node: Element) -> None:
raise nodes.SkipNode
def visit_meta(self, node: Element) -> None:
# only valid for HTML
raise nodes.SkipNode
def visit_system_message(self, node: Element) -> None:
pass
def depart_system_message(self, node: Element) -> None:
self.body.append(CR)
def visit_math(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\protect\(%s\protect\)' % node.astext())
else:
self.body.append(r'\(%s\)' % node.astext())
raise nodes.SkipNode
def visit_math_block(self, node: Element) -> None:
if node.get('label'):
label = "equation:%s:%s" % (node['docname'], node['label'])
else:
label = None
if node.get('nowrap'):
if label:
self.body.append(r'\label{%s}' % label)
self.body.append(node.astext())
else:
from sphinx.util.math import wrap_displaymath
self.body.append(wrap_displaymath(node.astext(), label,
self.config.math_number_all))
raise nodes.SkipNode
def visit_math_reference(self, node: Element) -> None:
label = "equation:%s:%s" % (node['docname'], node['target'])
eqref_format = self.config.math_eqref_format
if eqref_format:
try:
ref = r'\ref{%s}' % label
self.body.append(eqref_format.format(number=ref))
except KeyError as exc:
logger.warning(__('Invalid math_eqref_format: %r'), exc,
location=node)
self.body.append(r'\eqref{%s}' % label)
else:
self.body.append(r'\eqref{%s}' % label)
def depart_math_reference(self, node: Element) -> None:
pass
def unknown_visit(self, node: Node) -> None:
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
@property
def docclasses(self) -> Tuple[str, str]:
"""Prepends prefix to sphinx document classes"""
warnings.warn('LaTeXWriter.docclasses() is deprecated.',
RemovedInSphinx70Warning, stacklevel=2)
return ('howto', 'manual')
# FIXME: Workaround to avoid circular import
# refs: https://github.com/sphinx-doc/sphinx/issues/5433
from sphinx.builders.latex.nodes import ( # NOQA isort:skip
HYPERLINK_SUPPORT_NODES, captioned_literal_block, footnotetext,
)
| 39.550414 | 95 | 0.567841 |
import re
import warnings
from collections import defaultdict
from os import path
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Set, Tuple, cast
from docutils import nodes, writers
from docutils.nodes import Element, Node, Text
from sphinx import addnodes, highlighting
from sphinx.deprecation import RemovedInSphinx70Warning
from sphinx.domains import IndexEntry
from sphinx.domains.std import StandardDomain
from sphinx.errors import SphinxError
from sphinx.locale import _, __, admonitionlabels
from sphinx.util import logging, split_into, texescape
from sphinx.util.docutils import SphinxTranslator
from sphinx.util.nodes import clean_astext, get_prev_node
from sphinx.util.template import LaTeXRenderer
from sphinx.util.texescape import tex_replace_map
try:
from docutils.utils.roman import toRoman
except ImportError:
from roman import toRoman
if TYPE_CHECKING:
from sphinx.builders.latex import LaTeXBuilder
from sphinx.builders.latex.theming import Theme
logger = logging.getLogger(__name__)
MAX_CITATION_LABEL_LENGTH = 8
LATEXSECTIONNAMES = ["part", "chapter", "section", "subsection",
"subsubsection", "paragraph", "subparagraph"]
ENUMERATE_LIST_STYLE = defaultdict(lambda: r'\arabic',
{
'arabic': r'\arabic',
'loweralpha': r'\alph',
'upperalpha': r'\Alph',
'lowerroman': r'\roman',
'upperroman': r'\Roman',
})
CR = '\n'
BLANKLINE = '\n\n'
EXTRA_RE = re.compile(r'^(.*\S)\s+\(([^()]*)\)\s*$')
class collected_footnote(nodes.footnote):
class UnsupportedError(SphinxError):
category = 'Markup is unsupported in LaTeX'
class LaTeXWriter(writers.Writer):
supported = ('sphinxlatex',)
settings_spec = ('LaTeX writer options', '', (
('Document name', ['--docname'], {'default': ''}),
('Document class', ['--docclass'], {'default': 'manual'}),
('Author', ['--author'], {'default': ''}),
))
settings_defaults: Dict = {}
output = None
def __init__(self, builder: "LaTeXBuilder") -> None:
super().__init__()
self.builder = builder
self.theme: Theme = None
def translate(self) -> None:
visitor = self.builder.create_translator(self.document, self.builder, self.theme)
self.document.walkabout(visitor)
self.output = cast(LaTeXTranslator, visitor).astext()
class Table:
def __init__(self, node: Element) -> None:
self.header: List[str] = []
self.body: List[str] = []
self.align = node.get('align', 'default')
self.classes: List[str] = node.get('classes', [])
self.colcount = 0
self.colspec: str = None
self.colwidths: List[int] = []
self.has_problematic = False
self.has_oldproblematic = False
self.has_verbatim = False
self.caption: List[str] = None
self.stubs: List[int] = []
self.col = 0
self.row = 0
self.cells: Dict[Tuple[int, int], int] = defaultdict(int)
self.cell_id = 0
def is_longtable(self) -> bool:
return self.row > 30 or 'longtable' in self.classes
def get_table_type(self) -> str:
if self.is_longtable():
return 'longtable'
elif self.has_verbatim:
return 'tabular'
elif self.colspec:
return 'tabulary'
elif self.has_problematic or (self.colwidths and 'colwidths-given' in self.classes):
return 'tabular'
else:
return 'tabulary'
def get_colspec(self) -> str:
if self.colspec:
return self.colspec
elif self.colwidths and 'colwidths-given' in self.classes:
total = sum(self.colwidths)
colspecs = [r'\X{%d}{%d}' % (width, total) for width in self.colwidths]
return '{|%s|}' % '|'.join(colspecs) + CR
elif self.has_problematic:
return r'{|*{%d}{\X{1}{%d}|}}' % (self.colcount, self.colcount) + CR
elif self.get_table_type() == 'tabulary':
return '{|' + ('T|' * self.colcount) + '}' + CR
elif self.has_oldproblematic:
return r'{|*{%d}{\X{1}{%d}|}}' % (self.colcount, self.colcount) + CR
else:
return '{|' + ('l|' * self.colcount) + '}' + CR
def add_cell(self, height: int, width: int) -> None:
self.cell_id += 1
for col in range(width):
for row in range(height):
assert self.cells[(self.row + row, self.col + col)] == 0
self.cells[(self.row + row, self.col + col)] = self.cell_id
def cell(self, row: int = None, col: int = None) -> "TableCell":
try:
if row is None:
row = self.row
if col is None:
col = self.col
return TableCell(self, row, col)
except IndexError:
return None
class TableCell:
def __init__(self, table: Table, row: int, col: int) -> None:
if table.cells[(row, col)] == 0:
raise IndexError
self.table = table
self.cell_id = table.cells[(row, col)]
self.row = row
self.col = col
while table.cells[(self.row - 1, self.col)] == self.cell_id:
self.row -= 1
while table.cells[(self.row, self.col - 1)] == self.cell_id:
self.col -= 1
@property
def width(self) -> int:
width = 0
while self.table.cells[(self.row, self.col + width)] == self.cell_id:
width += 1
return width
@property
def height(self) -> int:
height = 0
while self.table.cells[(self.row + height, self.col)] == self.cell_id:
height += 1
return height
def escape_abbr(text: str) -> str:
return re.sub(r'\.(?=\s|$)', r'.\@', text)
def rstdim_to_latexdim(width_str: str, scale: int = 100) -> str:
match = re.match(r'^(\d*\.?\d*)\s*(\S*)$', width_str)
if not match:
raise ValueError
res = width_str
amount, unit = match.groups()[:2]
if scale == 100:
float(amount)
if unit in ('', "px"):
res = r"%s\sphinxpxdimen" % amount
elif unit == 'pt':
res = '%sbp' % amount
elif unit == "%":
res = r"%.3f\linewidth" % (float(amount) / 100.0)
else:
amount_float = float(amount) * scale / 100.0
if unit in ('', "px"):
res = r"%.5f\sphinxpxdimen" % amount_float
elif unit == 'pt':
res = '%.5fbp' % amount_float
elif unit == "%":
res = r"%.5f\linewidth" % (amount_float / 100.0)
else:
res = "%.5f%s" % (amount_float, unit)
return res
class LaTeXTranslator(SphinxTranslator):
builder: "LaTeXBuilder" = None
secnumdepth = 2
ignore_missing_images = False
def __init__(self, document: nodes.document, builder: "LaTeXBuilder",
theme: "Theme") -> None:
super().__init__(document, builder)
self.body: List[str] = []
self.theme = theme
self.in_title = 0
self.in_production_list = 0
self.in_footnote = 0
self.in_caption = 0
self.in_term = 0
self.needs_linetrimming = 0
self.in_minipage = 0
self.no_latex_floats = 0
self.first_document = 1
self.this_is_the_title = 1
self.literal_whitespace = 0
self.in_parsed_literal = 0
self.compact_list = 0
self.first_param = 0
sphinxpkgoptions = []
self.elements = self.builder.context.copy()
self.sectionnames = LATEXSECTIONNAMES[:]
if self.theme.toplevel_sectioning == 'section':
self.sectionnames.remove('chapter')
self.top_sectionlevel = 1
if self.config.latex_toplevel_sectioning:
try:
self.top_sectionlevel = \
self.sectionnames.index(self.config.latex_toplevel_sectioning)
except ValueError:
logger.warning(__('unknown %r toplevel_sectioning for class %r') %
(self.config.latex_toplevel_sectioning, self.theme.docclass))
if self.config.numfig:
self.numfig_secnum_depth = self.config.numfig_secnum_depth
if self.numfig_secnum_depth > 0:
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
self.numfig_secnum_depth += self.top_sectionlevel
else:
self.numfig_secnum_depth += self.top_sectionlevel - 1
self.numfig_secnum_depth = min(self.numfig_secnum_depth,
len(LATEXSECTIONNAMES) - 1)
# if passed key value is < 1 LaTeX will act as if 0; see sphinx.sty
sphinxpkgoptions.append('numfigreset=%s' % self.numfig_secnum_depth)
else:
sphinxpkgoptions.append('nonumfigreset')
if self.config.numfig and self.config.math_numfig:
sphinxpkgoptions.append('mathnumfig')
if (self.config.language not in {None, 'en', 'ja'} and
'fncychap' not in self.config.latex_elements):
# use Sonny style if any language specified (except English)
self.elements['fncychap'] = (r'\usepackage[Sonny]{fncychap}' + CR +
r'\ChNameVar{\Large\normalfont\sffamily}' + CR +
r'\ChTitleVar{\Large\normalfont\sffamily}')
self.babel = self.builder.babel
if self.config.language and not self.babel.is_supported_language():
# emit warning if specified language is invalid
# (only emitting, nothing changed to processing)
logger.warning(__('no Babel option known for language %r'),
self.config.language)
minsecnumdepth = self.secnumdepth # 2 from legacy sphinx manual/howto
if self.document.get('tocdepth'):
# reduce tocdepth if `part` or `chapter` is used for top_sectionlevel
# tocdepth = -1: show only parts
# tocdepth = 0: show parts and chapters
# tocdepth = 1: show parts, chapters and sections
# tocdepth = 2: show parts, chapters, sections and subsections
# ...
tocdepth = self.document.get('tocdepth', 999) + self.top_sectionlevel - 2
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
tocdepth += 1 # because top_sectionlevel is shifted by -1
if tocdepth > len(LATEXSECTIONNAMES) - 2: # default is 5 <-> subparagraph
logger.warning(__('too large :maxdepth:, ignored.'))
tocdepth = len(LATEXSECTIONNAMES) - 2
self.elements['tocdepth'] = r'\setcounter{tocdepth}{%d}' % tocdepth
minsecnumdepth = max(minsecnumdepth, tocdepth)
if self.config.numfig and (self.config.numfig_secnum_depth > 0):
minsecnumdepth = max(minsecnumdepth, self.numfig_secnum_depth - 1)
if minsecnumdepth > self.secnumdepth:
self.elements['secnumdepth'] = r'\setcounter{secnumdepth}{%d}' %\
minsecnumdepth
contentsname = document.get('contentsname')
if contentsname:
self.elements['contentsname'] = self.babel_renewcommand(r'\contentsname',
contentsname)
if self.elements['maxlistdepth']:
sphinxpkgoptions.append('maxlistdepth=%s' % self.elements['maxlistdepth'])
if sphinxpkgoptions:
self.elements['sphinxpkgoptions'] = '[,%s]' % ','.join(sphinxpkgoptions)
if self.elements['sphinxsetup']:
self.elements['sphinxsetup'] = (r'\sphinxsetup{%s}' % self.elements['sphinxsetup'])
if self.elements['extraclassoptions']:
self.elements['classoptions'] += ',' + \
self.elements['extraclassoptions']
self.highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style,
latex_engine=self.config.latex_engine)
self.context: List[Any] = []
self.descstack: List[str] = []
self.tables: List[Table] = []
self.next_table_colspec: str = None
self.bodystack: List[List[str]] = []
self.footnote_restricted: Element = None
self.pending_footnotes: List[nodes.footnote_reference] = []
self.curfilestack: List[str] = []
self.handled_abbrs: Set[str] = set()
def pushbody(self, newbody: List[str]) -> None:
self.bodystack.append(self.body)
self.body = newbody
def popbody(self) -> List[str]:
body = self.body
self.body = self.bodystack.pop()
return body
def astext(self) -> str:
self.elements.update({
'body': ''.join(self.body),
'indices': self.generate_indices()
})
return self.render('latex.tex_t', self.elements)
def hypertarget(self, id: str, withdoc: bool = True, anchor: bool = True) -> str:
if withdoc:
id = self.curfilestack[-1] + ':' + id
return (r'\phantomsection' if anchor else '') + r'\label{%s}' % self.idescape(id)
def hypertarget_to(self, node: Element, anchor: bool = False) -> str:
labels = ''.join(self.hypertarget(node_id, anchor=False) for node_id in node['ids'])
if anchor:
return r'\phantomsection' + labels
else:
return labels
def hyperlink(self, id: str) -> str:
return r'{\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id: str) -> str:
return r'\autopageref*{%s}' % self.idescape(id)
def escape(self, s: str) -> str:
return texescape.escape(s, self.config.latex_engine)
def idescape(self, id: str) -> str:
return r'\detokenize{%s}' % str(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def babel_renewcommand(self, command: str, definition: str) -> str:
if self.elements['multilingual']:
prefix = r'\addto\captions%s{' % self.babel.get_language()
suffix = '}'
else: # babel is disabled (mainly for Japanese environment)
prefix = ''
suffix = ''
return r'%s\renewcommand{%s}{%s}%s' % (prefix, command, definition, suffix) + CR
def generate_indices(self) -> str:
def generate(content: List[Tuple[str, List[IndexEntry]]], collapsed: bool) -> None:
ret.append(r'\begin{sphinxtheindex}' + CR)
ret.append(r'\let\bigletter\sphinxstyleindexlettergroup' + CR)
for i, (letter, entries) in enumerate(content):
if i > 0:
ret.append(r'\indexspace' + CR)
ret.append(r'\bigletter{%s}' % self.escape(letter) + CR)
for entry in entries:
if not entry[3]:
continue
ret.append(r'\item\relax\sphinxstyleindexentry{%s}' %
self.encode(entry[0]))
if entry[4]:
# add "extra" info
ret.append(r'\sphinxstyleindexextra{%s}' % self.encode(entry[4]))
ret.append(r'\sphinxstyleindexpageref{%s:%s}' %
(entry[2], self.idescape(entry[3])) + CR)
ret.append(r'\end{sphinxtheindex}' + CR)
ret = []
# latex_domain_indices can be False/True or a list of index names
indices_config = self.config.latex_domain_indices
if indices_config:
for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
ret.append(r'\renewcommand{\indexname}{%s}' % indexcls.localname + CR)
generate(content, collapsed)
return ''.join(ret)
def render(self, template_name: str, variables: Dict) -> str:
renderer = LaTeXRenderer(latex_engine=self.config.latex_engine)
for template_dir in self.config.templates_path:
template = path.join(self.builder.confdir, template_dir,
template_name)
if path.exists(template):
return renderer.render(template, variables)
return renderer.render(template_name, variables)
@property
def table(self) -> Table:
if self.tables:
return self.tables[-1]
else:
return None
def visit_document(self, node: Element) -> None:
self.curfilestack.append(node.get('docname', ''))
if self.first_document == 1:
# the first document is all the regular content ...
self.first_document = 0
elif self.first_document == 0:
# ... and all others are the appendices
self.body.append(CR + r'\appendix' + CR)
self.first_document = -1
if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
def depart_document(self, node: Element) -> None:
pass
def visit_start_of_file(self, node: Element) -> None:
self.curfilestack.append(node['docname'])
def depart_start_of_file(self, node: Element) -> None:
self.curfilestack.pop()
def visit_section(self, node: Element) -> None:
if not self.this_is_the_title:
self.sectionlevel += 1
self.body.append(BLANKLINE)
def depart_section(self, node: Element) -> None:
self.sectionlevel = max(self.sectionlevel - 1,
self.top_sectionlevel - 1)
def visit_problematic(self, node: Element) -> None:
self.body.append(r'{\color{red}\bfseries{}')
def depart_problematic(self, node: Element) -> None:
self.body.append('}')
def visit_topic(self, node: Element) -> None:
self.in_minipage = 1
self.body.append(CR + r'\begin{sphinxShadowBox}' + CR)
def depart_topic(self, node: Element) -> None:
self.in_minipage = 0
self.body.append(r'\end{sphinxShadowBox}' + CR)
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_glossary(self, node: Element) -> None:
pass
def depart_glossary(self, node: Element) -> None:
pass
def visit_productionlist(self, node: Element) -> None:
self.body.append(BLANKLINE)
self.body.append(r'\begin{productionlist}' + CR)
self.in_production_list = 1
def depart_productionlist(self, node: Element) -> None:
self.body.append(r'\end{productionlist}' + BLANKLINE)
self.in_production_list = 0
def visit_production(self, node: Element) -> None:
if node['tokenname']:
tn = node['tokenname']
self.body.append(self.hypertarget('grammar-token-' + tn))
self.body.append(r'\production{%s}{' % self.encode(tn))
else:
self.body.append(r'\productioncont{')
def depart_production(self, node: Element) -> None:
self.body.append('}' + CR)
def visit_transition(self, node: Element) -> None:
self.body.append(self.elements['transition'])
def depart_transition(self, node: Element) -> None:
pass
def visit_title(self, node: Element) -> None:
parent = node.parent
if isinstance(parent, addnodes.seealso):
# the environment already handles this
raise nodes.SkipNode
elif isinstance(parent, nodes.section):
if self.this_is_the_title:
if len(node.children) != 1 and not isinstance(node.children[0],
nodes.Text):
logger.warning(__('document title is not a single Text node'),
location=node)
if not self.elements['title']:
# text needs to be escaped since it is inserted into
# the output literally
self.elements['title'] = self.escape(node.astext())
self.this_is_the_title = 0
raise nodes.SkipNode
else:
short = ''
if list(node.traverse(nodes.image)):
short = ('[%s]' % self.escape(' '.join(clean_astext(node).split())))
try:
self.body.append(r'\%s%s{' % (self.sectionnames[self.sectionlevel], short))
except IndexError:
# just use "subparagraph", it's not numbered anyway
self.body.append(r'\%s%s{' % (self.sectionnames[-1], short))
self.context.append('}' + CR + self.hypertarget_to(node.parent))
elif isinstance(parent, nodes.topic):
self.body.append(r'\sphinxstyletopictitle{')
self.context.append('}' + CR)
elif isinstance(parent, nodes.sidebar):
self.body.append(r'\sphinxstylesidebartitle{')
self.context.append('}' + CR)
elif isinstance(parent, nodes.Admonition):
self.body.append('{')
self.context.append('}' + CR)
elif isinstance(parent, nodes.table):
self.pushbody([])
else:
logger.warning(__('encountered title node not in section, topic, table, '
'admonition or sidebar'),
location=node)
self.body.append(r'\sphinxstyleothertitle{')
self.context.append('}' + CR)
self.in_title = 1
def depart_title(self, node: Element) -> None:
self.in_title = 0
if isinstance(node.parent, nodes.table):
self.table.caption = self.popbody()
else:
self.body.append(self.context.pop())
def visit_subtitle(self, node: Element) -> None:
if isinstance(node.parent, nodes.sidebar):
self.body.append(r'\sphinxstylesidebarsubtitle{')
self.context.append('}' + CR)
else:
self.context.append('')
def depart_subtitle(self, node: Element) -> None:
self.body.append(self.context.pop())
raise UnsupportedError(
'%s:%s: longtable does not support nesting a table.' %
(self.curfilestack[-1], node.line or ''))
else:
self.table.has_problematic = True
elif len(self.tables) > 2:
raise UnsupportedError(
'%s:%s: deeply nested tables are not implemented.' %
(self.curfilestack[-1], node.line or ''))
self.tables.append(Table(node))
if self.next_table_colspec:
self.table.colspec = '{%s}' % self.next_table_colspec + CR
if 'colwidths-given' in node.get('classes', []):
logger.info(__('both tabularcolumns and :widths: option are given. '
':widths: is ignored.'), location=node)
self.next_table_colspec = None
def depart_table(self, node: Element) -> None:
labels = self.hypertarget_to(node)
table_type = self.table.get_table_type()
table = self.render(table_type + '.tex_t',
dict(table=self.table, labels=labels))
self.body.append(BLANKLINE)
self.body.append(table)
self.body.append(CR)
self.tables.pop()
def visit_colspec(self, node: Element) -> None:
self.table.colcount += 1
if 'colwidth' in node:
self.table.colwidths.append(node['colwidth'])
if 'stub' in node:
self.table.stubs.append(self.table.colcount - 1)
def depart_colspec(self, node: Element) -> None:
pass
def visit_tgroup(self, node: Element) -> None:
pass
def depart_tgroup(self, node: Element) -> None:
pass
def visit_thead(self, node: Element) -> None:
self.pushbody(self.table.header)
def depart_thead(self, node: Element) -> None:
self.popbody()
def visit_tbody(self, node: Element) -> None:
self.pushbody(self.table.body)
def depart_tbody(self, node: Element) -> None:
self.popbody()
def visit_row(self, node: Element) -> None:
self.table.col = 0
while True:
cell = self.table.cell(self.table.row, self.table.col)
if cell is None:
break
else:
self.table.col += cell.width
if cell.col:
self.body.append('&')
if cell.width == 1:
self.body.append(r'\sphinxtablestrut{%d}' % cell.cell_id)
else:
self.body.append(r'\multicolumn{%d}{|l|}{\sphinxtablestrut{%d}}' %
(cell.width, cell.cell_id))
def depart_row(self, node: Element) -> None:
self.body.append(r'\\' + CR)
cells = [self.table.cell(self.table.row, i) for i in range(self.table.colcount)]
underlined = [cell.row + cell.height == self.table.row + 1 for cell in cells]
if all(underlined):
self.body.append(r'\hline')
else:
i = 0
underlined.extend([False])
while i < len(underlined):
if underlined[i] is True:
j = underlined[i:].index(False)
self.body.append(r'\cline{%d-%d}' % (i + 1, i + j))
i += j
i += 1
self.table.row += 1
def visit_entry(self, node: Element) -> None:
if self.table.col > 0:
self.body.append('&')
self.table.add_cell(node.get('morerows', 0) + 1, node.get('morecols', 0) + 1)
cell = self.table.cell()
context = ''
if cell.width > 1:
if self.config.latex_use_latex_multicolumn:
if self.table.col == 0:
self.body.append(r'\multicolumn{%d}{|l|}{%%' % cell.width + CR)
else:
self.body.append(r'\multicolumn{%d}{l|}{%%' % cell.width + CR)
context = '}%' + CR
else:
self.body.append(r'\sphinxstartmulticolumn{%d}%%' % cell.width + CR)
context = r'\sphinxstopmulticolumn' + CR
if cell.height > 1:
self.body.append(r'\sphinxmultirow{%d}{%d}{%%' % (cell.height, cell.cell_id) + CR)
context = '}%' + CR + context
if cell.width > 1 or cell.height > 1:
self.body.append(r'\begin{varwidth}[t]{\sphinxcolwidth{%d}{%d}}'
% (cell.width, self.table.colcount) + CR)
context = (r'\par' + CR + r'\vskip-\baselineskip'
r'\vbox{\hbox{\strut}}\end{varwidth}%' + CR + context)
self.needs_linetrimming = 1
if len(list(node.traverse(nodes.paragraph))) >= 2:
self.table.has_oldproblematic = True
if isinstance(node.parent.parent, nodes.thead) or (cell.col in self.table.stubs):
if len(node) == 1 and isinstance(node[0], nodes.paragraph) and node.astext() == '':
pass
else:
self.body.append(r'\sphinxstyletheadfamily ')
if self.needs_linetrimming:
self.pushbody([])
self.context.append(context)
def depart_entry(self, node: Element) -> None:
if self.needs_linetrimming:
self.needs_linetrimming = 0
body = self.popbody()
while body and body[0] == CR:
body.pop(0)
self.body.extend(body)
self.body.append(self.context.pop())
cell = self.table.cell()
self.table.col += cell.width
while True:
nextcell = self.table.cell()
if nextcell is None:
break
else:
self.table.col += nextcell.width
self.body.append('&')
if nextcell.width == 1:
self.body.append(r'\sphinxtablestrut{%d}' % nextcell.cell_id)
else:
self.body.append(r'\multicolumn{%d}{l|}{\sphinxtablestrut{%d}}' %
(nextcell.width, nextcell.cell_id))
def visit_acks(self, node: Element) -> None:
bullet_list = cast(nodes.bullet_list, node[0])
list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append(BLANKLINE)
self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append(BLANKLINE)
raise nodes.SkipNode
def visit_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append(r'\begin{itemize}' + CR)
if self.table:
self.table.has_problematic = True
def depart_bullet_list(self, node: Element) -> None:
if not self.compact_list:
self.body.append(r'\end{itemize}' + CR)
def visit_enumerated_list(self, node: Element) -> None:
def get_enumtype(node: Element) -> str:
enumtype = node.get('enumtype', 'arabic')
if 'alpha' in enumtype and 26 < node.get('start', 0) + len(node):
enumtype = 'arabic'
return enumtype
def get_nested_level(node: Element) -> int:
if node is None:
return 0
elif isinstance(node, nodes.enumerated_list):
return get_nested_level(node.parent) + 1
else:
return get_nested_level(node.parent)
enum = "enum%s" % toRoman(get_nested_level(node)).lower()
enumnext = "enum%s" % toRoman(get_nested_level(node) + 1).lower()
style = ENUMERATE_LIST_STYLE.get(get_enumtype(node))
prefix = node.get('prefix', '')
suffix = node.get('suffix', '.')
self.body.append(r'\begin{enumerate}' + CR)
self.body.append(r'\sphinxsetlistlabels{%s}{%s}{%s}{%s}{%s}%%' %
(style, enum, enumnext, prefix, suffix) + CR)
if 'start' in node:
self.body.append(r'\setcounter{%s}{%d}' % (enum, node['start'] - 1) + CR)
if self.table:
self.table.has_problematic = True
def depart_enumerated_list(self, node: Element) -> None:
self.body.append(r'\end{enumerate}' + CR)
def visit_list_item(self, node: Element) -> None:
self.body.append(r'\item {} ')
def depart_list_item(self, node: Element) -> None:
self.body.append(CR)
def visit_definition_list(self, node: Element) -> None:
self.body.append(r'\begin{description}' + CR)
if self.table:
self.table.has_problematic = True
def depart_definition_list(self, node: Element) -> None:
self.body.append(r'\end{description}' + CR)
def visit_definition_list_item(self, node: Element) -> None:
pass
def depart_definition_list_item(self, node: Element) -> None:
pass
def visit_term(self, node: Element) -> None:
self.in_term += 1
ctx = ''
if node.get('ids'):
ctx = r'\phantomsection'
for node_id in node['ids']:
ctx += self.hypertarget(node_id, anchor=False)
ctx += r'}'
self.body.append(r'\sphinxlineitem{')
self.context.append(ctx)
def depart_term(self, node: Element) -> None:
self.body.append(self.context.pop())
self.in_term -= 1
def visit_classifier(self, node: Element) -> None:
self.body.append('{[}')
def depart_classifier(self, node: Element) -> None:
self.body.append('{]}')
def visit_definition(self, node: Element) -> None:
pass
def depart_definition(self, node: Element) -> None:
self.body.append(CR)
def visit_field_list(self, node: Element) -> None:
self.body.append(r'\begin{quote}\begin{description}' + CR)
if self.table:
self.table.has_problematic = True
def depart_field_list(self, node: Element) -> None:
self.body.append(r'\end{description}\end{quote}' + CR)
def visit_field(self, node: Element) -> None:
pass
def depart_field(self, node: Element) -> None:
pass
visit_field_name = visit_term
depart_field_name = depart_term
visit_field_body = visit_definition
depart_field_body = depart_definition
def visit_paragraph(self, node: Element) -> None:
index = node.parent.index(node)
if (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
not isinstance(node.parent[index - 1], nodes.compound)):
# insert blank line, if the paragraph follows a non-paragraph node in a compound
self.body.append(r'\noindent' + CR)
elif index == 1 and isinstance(node.parent, (nodes.footnote, footnotetext)):
# don't insert blank line, if the paragraph is second child of a footnote
pass
else:
self.body.extend([CR, r'\sphinxAtStartPar' + CR])
def depart_paragraph(self, node: Element) -> None:
self.body.append(CR)
def visit_centered(self, node: Element) -> None:
self.body.append(CR + r'\begin{center}')
if self.table:
self.table.has_problematic = True
def depart_centered(self, node: Element) -> None:
self.body.append(CR + r'\end{center}')
def visit_hlist(self, node: Element) -> None:
self.compact_list += 1
ncolumns = node['ncolumns']
if self.compact_list > 1:
self.body.append(r'\setlength{\multicolsep}{0pt}' + CR)
self.body.append(r'\begin{multicols}{' + ncolumns + r'}\raggedright' + CR)
self.body.append(r'\begin{itemize}\setlength{\itemsep}{0pt}'
r'\setlength{\parskip}{0pt}' + CR)
if self.table:
self.table.has_problematic = True
def depart_hlist(self, node: Element) -> None:
self.compact_list -= 1
self.body.append(r'\end{itemize}\raggedcolumns\end{multicols}' + CR)
def visit_hlistcol(self, node: Element) -> None:
pass
def depart_hlistcol(self, node: Element) -> None:
pass
def latex_image_length(self, width_str: str, scale: int = 100) -> str:
try:
return rstdim_to_latexdim(width_str, scale)
except ValueError:
logger.warning(__('dimension unit %s is invalid. Ignored.'), width_str)
return None
def is_inline(self, node: Element) -> bool:
return isinstance(node.parent, nodes.TextElement)
def visit_image(self, node: Element) -> None:
pre: List[str] = []
post: List[str] = []
include_graphics_options = []
has_hyperlink = isinstance(node.parent, nodes.reference)
if has_hyperlink:
is_inline = self.is_inline(node.parent)
else:
is_inline = self.is_inline(node)
if 'width' in node:
if 'scale' in node:
w = self.latex_image_length(node['width'], node['scale'])
else:
w = self.latex_image_length(node['width'])
if w:
include_graphics_options.append('width=%s' % w)
if 'height' in node:
if 'scale' in node:
h = self.latex_image_length(node['height'], node['scale'])
else:
h = self.latex_image_length(node['height'])
if h:
include_graphics_options.append('height=%s' % h)
if 'scale' in node:
if not include_graphics_options:
include_graphics_options.append('scale=%s'
% (float(node['scale']) / 100.0))
if 'align' in node:
align_prepost = {
(1, 'top'): ('', ''),
(1, 'middle'): (r'\raisebox{-0.5\height}{', '}'),
(1, 'bottom'): (r'\raisebox{-\height}{', '}'),
(0, 'center'): (r'{\hspace*{\fill}', r'\hspace*{\fill}}'),
# be floated alongside the paragraph. See
# https://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
(0, 'left'): ('{', r'\hspace*{\fill}}'),
(0, 'right'): (r'{\hspace*{\fill}', '}'),
}
try:
pre.append(align_prepost[is_inline, node['align']][0])
post.append(align_prepost[is_inline, node['align']][1])
except KeyError:
pass
if self.in_parsed_literal:
pre.append(r'{\sphinxunactivateextrasandspace ')
post.append('}')
if not is_inline and not has_hyperlink:
pre.append(CR + r'\noindent')
post.append(CR)
pre.reverse()
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
self.body.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % ','.join(include_graphics_options)
base, ext = path.splitext(uri)
if self.in_title and base:
# Lowercase tokens forcely because some fncychap themes capitalize
# the options of \sphinxincludegraphics unexpectedly (ex. WIDTH=...).
self.body.append(r'\lowercase{\sphinxincludegraphics%s}{{%s}%s}' %
(options, base, ext))
else:
self.body.append(r'\sphinxincludegraphics%s{{%s}%s}' %
(options, base, ext))
self.body.extend(post)
def depart_image(self, node: Element) -> None:
pass
def visit_figure(self, node: Element) -> None:
align = self.elements['figure_align']
if self.no_latex_floats:
align = "H"
if self.table:
# TODO: support align option
if 'width' in node:
length = self.latex_image_length(node['width'])
if length:
self.body.append(r'\begin{sphinxfigure-in-table}[%s]' % length + CR)
self.body.append(r'\centering' + CR)
else:
self.body.append(r'\begin{sphinxfigure-in-table}' + CR)
self.body.append(r'\centering' + CR)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append(r'\capstart')
self.context.append(r'\end{sphinxfigure-in-table}\relax' + CR)
elif node.get('align', '') in ('left', 'right'):
length = None
if 'width' in node:
length = self.latex_image_length(node['width'])
elif isinstance(node[0], nodes.image) and 'width' in node[0]:
length = self.latex_image_length(node[0]['width'])
self.body.append(BLANKLINE) # Insert a blank line to prevent infinite loop
# https://github.com/sphinx-doc/sphinx/issues/7059
self.body.append(r'\begin{wrapfigure}{%s}{%s}' %
('r' if node['align'] == 'right' else 'l', length or '0pt') + CR)
self.body.append(r'\centering')
self.context.append(r'\end{wrapfigure}' + CR)
elif self.in_minipage:
self.body.append(CR + r'\begin{center}')
self.context.append(r'\end{center}' + CR)
else:
self.body.append(CR + r'\begin{figure}[%s]' % align + CR)
self.body.append(r'\centering' + CR)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append(r'\capstart' + CR)
self.context.append(r'\end{figure}' + CR)
def depart_figure(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_caption(self, node: Element) -> None:
self.in_caption += 1
if isinstance(node.parent, captioned_literal_block):
self.body.append(r'\sphinxSetupCaptionForVerbatim{')
elif self.in_minipage and isinstance(node.parent, nodes.figure):
self.body.append(r'\captionof{figure}{')
elif self.table and node.parent.tagname == 'figure':
self.body.append(r'\sphinxfigcaption{')
else:
self.body.append(r'\caption{')
def depart_caption(self, node: Element) -> None:
self.body.append('}')
if isinstance(node.parent, nodes.figure):
labels = self.hypertarget_to(node.parent)
self.body.append(labels)
self.in_caption -= 1
def visit_legend(self, node: Element) -> None:
self.body.append(CR + r'\begin{sphinxlegend}')
def depart_legend(self, node: Element) -> None:
self.body.append(r'\end{sphinxlegend}' + CR)
def visit_admonition(self, node: Element) -> None:
self.body.append(CR + r'\begin{sphinxadmonition}{note}')
self.no_latex_floats += 1
def depart_admonition(self, node: Element) -> None:
self.body.append(r'\end{sphinxadmonition}' + CR)
self.no_latex_floats -= 1
def _visit_named_admonition(self, node: Element) -> None:
label = admonitionlabels[node.tagname]
self.body.append(CR + r'\begin{sphinxadmonition}{%s}{%s:}' %
(node.tagname, label))
self.no_latex_floats += 1
def _depart_named_admonition(self, node: Element) -> None:
self.body.append(r'\end{sphinxadmonition}' + CR)
self.no_latex_floats -= 1
visit_attention = _visit_named_admonition
depart_attention = _depart_named_admonition
visit_caution = _visit_named_admonition
depart_caution = _depart_named_admonition
visit_danger = _visit_named_admonition
depart_danger = _depart_named_admonition
visit_error = _visit_named_admonition
depart_error = _depart_named_admonition
visit_hint = _visit_named_admonition
depart_hint = _depart_named_admonition
visit_important = _visit_named_admonition
depart_important = _depart_named_admonition
visit_note = _visit_named_admonition
depart_note = _depart_named_admonition
visit_tip = _visit_named_admonition
depart_tip = _depart_named_admonition
visit_warning = _visit_named_admonition
depart_warning = _depart_named_admonition
def visit_versionmodified(self, node: Element) -> None:
pass
def depart_versionmodified(self, node: Element) -> None:
pass
def visit_target(self, node: Element) -> None:
def add_target(id: str) -> None:
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
return
# equations also need no extra blank line nor hypertarget
# TODO: fix this dependency on mathbase extension internals
if id.startswith('equation-'):
return
# insert blank line, if the target follows a paragraph node
index = node.parent.index(node)
if index > 0 and isinstance(node.parent[index - 1], nodes.paragraph):
self.body.append(CR)
# do not generate \phantomsection in \section{}
anchor = not self.in_title
self.body.append(self.hypertarget(id, anchor=anchor))
# skip if visitor for next node supports hyperlink
next_node: Node = node
while isinstance(next_node, nodes.target):
next_node = next_node.next_node(ascend=True)
domain = cast(StandardDomain, self.builder.env.get_domain('std'))
if isinstance(next_node, HYPERLINK_SUPPORT_NODES):
return
elif domain.get_enumerable_node_type(next_node) and domain.get_numfig_title(next_node):
return
if 'refuri' in node:
return
if 'anonymous' in node:
return
if node.get('refid'):
prev_node = get_prev_node(node)
if isinstance(prev_node, nodes.reference) and node['refid'] == prev_node['refid']:
# a target for a hyperlink reference having alias
pass
else:
add_target(node['refid'])
for id in node['ids']:
add_target(id)
def depart_target(self, node: Element) -> None:
pass
def visit_attribution(self, node: Element) -> None:
self.body.append(CR + r'\begin{flushright}' + CR)
self.body.append('---')
def depart_attribution(self, node: Element) -> None:
self.body.append(CR + r'\end{flushright}' + CR)
def visit_index(self, node: Element) -> None:
def escape(value: str) -> str:
value = self.encode(value)
value = value.replace(r'\{', r'\sphinxleftcurlybrace{}')
value = value.replace(r'\}', r'\sphinxrightcurlybrace{}')
value = value.replace('"', '""')
value = value.replace('@', '"@')
value = value.replace('!', '"!')
value = value.replace('|', r'\textbar{}')
return value
def style(string: str) -> str:
match = EXTRA_RE.match(string)
if match:
return match.expand(r'\\spxentry{\1}\\spxextra{\2}')
else:
return r'\spxentry{%s}' % string
if not node.get('inline', True):
self.body.append(CR)
entries = node['entries']
for type, string, tid, ismain, key_ in entries:
m = ''
if ismain:
m = '|spxpagem'
try:
if type == 'single':
try:
p1, p2 = [escape(x) for x in split_into(2, 'single', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}' % (p1, P1, p2, P2, m))
except ValueError:
p = escape(split_into(1, 'single', string)[0])
P = style(p)
self.body.append(r'\index{%s@%s%s}' % (p, P, m))
elif type == 'pair':
p1, p2 = [escape(x) for x in split_into(2, 'pair', string)]
P1, P2 = style(p1), style(p2)
self.body.append(r'\index{%s@%s!%s@%s%s}\index{%s@%s!%s@%s%s}' %
(p1, P1, p2, P2, m, p2, P2, p1, P1, m))
elif type == 'triple':
p1, p2, p3 = [escape(x) for x in split_into(3, 'triple', string)]
P1, P2, P3 = style(p1), style(p2), style(p3)
self.body.append(
r'\index{%s@%s!%s %s@%s %s%s}'
r'\index{%s@%s!%s, %s@%s, %s%s}'
r'\index{%s@%s!%s %s@%s %s%s}' %
(p1, P1, p2, p3, P2, P3, m,
p2, P2, p3, p1, P3, P1, m,
p3, P3, p1, p2, P1, P2, m))
elif type == 'see':
p1, p2 = [escape(x) for x in split_into(2, 'see', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
elif type == 'seealso':
p1, p2 = [escape(x) for x in split_into(2, 'seealso', string)]
P1 = style(p1)
self.body.append(r'\index{%s@%s|see{%s}}' % (p1, P1, p2))
else:
logger.warning(__('unknown index entry type %s found'), type)
except ValueError as err:
logger.warning(str(err))
if not node.get('inline', True):
self.body.append(r'\ignorespaces ')
raise nodes.SkipNode
def visit_raw(self, node: Element) -> None:
if not self.is_inline(node):
self.body.append(CR)
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
if not self.is_inline(node):
self.body.append(CR)
raise nodes.SkipNode
def visit_reference(self, node: Element) -> None:
if not self.in_title:
for id in node.get('ids'):
anchor = not self.in_caption
self.body += self.hypertarget(id, anchor=anchor)
if not self.is_inline(node):
self.body.append(CR)
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if self.in_title or not uri:
self.context.append('')
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.body.append(self.hyperlink(id))
self.body.append(r'\emph{')
if self.config.latex_show_pagerefs and not \
self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.body.append(self.hyperlink(id))
if (len(node) and
isinstance(node[0], nodes.Element) and
'std-term' in node[0].get('classes', [])):
# don't add a pageref for glossary terms
self.context.append('}}}')
# mark up as termreference
self.body.append(r'\sphinxtermref{')
else:
self.body.append(r'\sphinxcrossref{')
if self.config.latex_show_pagerefs and not self.in_production_list:
self.context.append('}}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}}')
else:
if len(node) == 1 and uri == node[0]:
if node.get('nolinkurl'):
self.body.append(r'\sphinxnolinkurl{%s}' % self.encode_uri(uri))
else:
self.body.append(r'\sphinxurl{%s}' % self.encode_uri(uri))
raise nodes.SkipNode
else:
self.body.append(r'\sphinxhref{%s}{' % self.encode_uri(uri))
self.context.append('}')
def depart_reference(self, node: Element) -> None:
self.body.append(self.context.pop())
if not self.is_inline(node):
self.body.append(CR)
def visit_number_reference(self, node: Element) -> None:
if node.get('refid'):
id = self.curfilestack[-1] + ':' + node['refid']
else:
id = node.get('refuri', '')[1:].replace('#', ':')
title = self.escape(node.get('title', '%s')).replace(r'\%s', '%s')
if r'\{name\}' in title or r'\{number\}' in title:
# new style format (cf. "Fig.%{number}")
title = title.replace(r'\{name\}', '{name}').replace(r'\{number\}', '{number}')
text = escape_abbr(title).format(name=r'\nameref{%s}' % self.idescape(id),
number=r'\ref{%s}' % self.idescape(id))
else:
# old style format (cf. "Fig.%{number}")
text = escape_abbr(title) % (r'\ref{%s}' % self.idescape(id))
hyperref = r'\hyperref[%s]{%s}' % (self.idescape(id), text)
self.body.append(hyperref)
raise nodes.SkipNode
def visit_download_reference(self, node: Element) -> None:
pass
def depart_download_reference(self, node: Element) -> None:
pass
def visit_pending_xref(self, node: Element) -> None:
pass
def depart_pending_xref(self, node: Element) -> None:
pass
def visit_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleemphasis{')
def depart_emphasis(self, node: Element) -> None:
self.body.append('}')
def visit_literal_emphasis(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralemphasis{\sphinxupquote{')
def depart_literal_emphasis(self, node: Element) -> None:
self.body.append('}}')
def visit_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstylestrong{')
def depart_strong(self, node: Element) -> None:
self.body.append('}')
def visit_literal_strong(self, node: Element) -> None:
self.body.append(r'\sphinxstyleliteralstrong{\sphinxupquote{')
def depart_literal_strong(self, node: Element) -> None:
self.body.append('}}')
def visit_abbreviation(self, node: Element) -> None:
abbr = node.astext()
self.body.append(r'\sphinxstyleabbreviation{')
# spell out the explanation once
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append('} (%s)' % self.encode(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_manpage(self, node: Element) -> None:
return self.visit_literal_emphasis(node)
def depart_manpage(self, node: Element) -> None:
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node: Element) -> None:
self.body.append(r'\sphinxtitleref{')
def depart_title_reference(self, node: Element) -> None:
self.body.append('}')
def visit_thebibliography(self, node: Element) -> None:
citations = cast(Iterable[nodes.citation], node)
labels = (cast(nodes.label, citation[0]) for citation in citations)
longest_label = max((label.astext() for label in labels), key=len)
if len(longest_label) > MAX_CITATION_LABEL_LENGTH:
# adjust max width of citation labels not to break the layout
longest_label = longest_label[:MAX_CITATION_LABEL_LENGTH]
self.body.append(CR + r'\begin{sphinxthebibliography}{%s}' %
self.encode(longest_label) + CR)
def depart_thebibliography(self, node: Element) -> None:
self.body.append(r'\end{sphinxthebibliography}' + CR)
def visit_citation(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append(r'\bibitem[%s]{%s:%s}' % (self.encode(label.astext()),
node['docname'], node['ids'][0]))
def depart_citation(self, node: Element) -> None:
pass
def visit_citation_reference(self, node: Element) -> None:
if self.in_title:
pass
else:
self.body.append(r'\sphinxcite{%s:%s}' % (node['docname'], node['refname']))
raise nodes.SkipNode
def depart_citation_reference(self, node: Element) -> None:
pass
def visit_literal(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\sphinxstyleliteralintitle{\sphinxupquote{')
elif 'kbd' in node['classes']:
self.body.append(r'\sphinxkeyboard{\sphinxupquote{')
else:
self.body.append(r'\sphinxcode{\sphinxupquote{')
def depart_literal(self, node: Element) -> None:
self.body.append('}}')
def visit_footnote_reference(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footnotemark(self, node: Element) -> None:
self.body.append(r'\sphinxfootnotemark[')
def depart_footnotemark(self, node: Element) -> None:
self.body.append(']')
def visit_footnotetext(self, node: Element) -> None:
label = cast(nodes.label, node[0])
self.body.append('%' + CR)
self.body.append(r'\begin{footnotetext}[%s]'
r'\phantomsection\label{\thesphinxscope.%s}%%'
% (label.astext(), label.astext()) + CR)
self.body.append(r'\sphinxAtStartFootnote' + CR)
def depart_footnotetext(self, node: Element) -> None:
# the \ignorespaces in particular for after table header use
self.body.append('%' + CR)
self.body.append(r'\end{footnotetext}\ignorespaces ')
def visit_captioned_literal_block(self, node: Element) -> None:
pass
def depart_captioned_literal_block(self, node: Element) -> None:
pass
def visit_literal_block(self, node: Element) -> None:
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.in_parsed_literal += 1
self.body.append(r'\begin{sphinxalltt}' + CR)
else:
labels = self.hypertarget_to(node)
if isinstance(node.parent, captioned_literal_block):
labels += self.hypertarget_to(node.parent)
if labels and not self.in_footnote:
self.body.append(CR + r'\def\sphinxLiteralBlockLabel{' + labels + '}')
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
highlight_args = node.get('highlight_args', {})
highlight_args['force'] = node.get('force', False)
opts = self.config.highlight_options.get(lang, {})
hlcode = self.highlighter.highlight_block(
node.rawsource, lang, opts=opts, linenos=linenos,
location=node, **highlight_args
)
if self.in_footnote:
self.body.append(CR + r'\sphinxSetupCodeBlockInFootnote')
hlcode = hlcode.replace(r'\begin{Verbatim}',
r'\begin{sphinxVerbatim}')
# if in table raise verbatim flag to avoid "tabulary" environment
# and opt for sphinxVerbatimintable to handle caption & long lines
elif self.table:
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.replace(r'\begin{Verbatim}',
r'\begin{sphinxVerbatimintable}')
else:
hlcode = hlcode.replace(r'\begin{Verbatim}',
r'\begin{sphinxVerbatim}')
# get consistent trailer
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
if self.table and not self.in_footnote:
hlcode += r'\end{sphinxVerbatimintable}'
else:
hlcode += r'\end{sphinxVerbatim}'
hllines = str(highlight_args.get('hl_lines', []))[1:-1]
if hllines:
self.body.append(CR + r'\fvset{hllines={, %s,}}%%' % hllines)
self.body.append(CR + hlcode + CR)
if hllines:
self.body.append(r'\sphinxresetverbatimhllines' + CR)
raise nodes.SkipNode
def depart_literal_block(self, node: Element) -> None:
self.body.append(CR + r'\end{sphinxalltt}' + CR)
self.in_parsed_literal -= 1
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line(self, node: Element) -> None:
self.body.append(r'\item[] ')
def depart_line(self, node: Element) -> None:
self.body.append(CR)
def visit_line_block(self, node: Element) -> None:
if isinstance(node.parent, nodes.line_block):
self.body.append(r'\item[]' + CR)
self.body.append(r'\begin{DUlineblock}{\DUlineblockindent}' + CR)
else:
self.body.append(CR + r'\begin{DUlineblock}{0em}' + CR)
if self.table:
self.table.has_problematic = True
def depart_line_block(self, node: Element) -> None:
self.body.append(r'\end{DUlineblock}' + CR)
def visit_block_quote(self, node: Element) -> None:
# If the block quote contains a single object and that object
# is a list, then generate a list not a block quote.
# This lets us indent lists.
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append(r'\begin{quote}' + CR)
if self.table:
self.table.has_problematic = True
def depart_block_quote(self, node: Element) -> None:
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append(r'\end{quote}' + CR)
# option node handling copied from docutils' latex writer
def visit_option(self, node: Element) -> None:
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node: Element) -> None:
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node: Element) -> None:
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node: Element) -> None:
pass
def visit_option_group(self, node: Element) -> None:
self.body.append(r'\item [')
# flag for first option
self.context.append(0)
def depart_option_group(self, node: Element) -> None:
self.context.pop() # the flag
self.body.append('] ')
def visit_option_list(self, node: Element) -> None:
self.body.append(r'\begin{optionlist}{3cm}' + CR)
if self.table:
self.table.has_problematic = True
def depart_option_list(self, node: Element) -> None:
self.body.append(r'\end{optionlist}' + CR)
def visit_option_list_item(self, node: Element) -> None:
pass
def depart_option_list_item(self, node: Element) -> None:
pass
def visit_option_string(self, node: Element) -> None:
ostring = node.astext()
self.body.append(self.encode(ostring))
raise nodes.SkipNode
def visit_description(self, node: Element) -> None:
self.body.append(' ')
def depart_description(self, node: Element) -> None:
pass
def visit_superscript(self, node: Element) -> None:
self.body.append(r'$^{\text{')
def depart_superscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_subscript(self, node: Element) -> None:
self.body.append(r'$_{\text{')
def depart_subscript(self, node: Element) -> None:
self.body.append('}}$')
def visit_inline(self, node: Element) -> None:
classes = node.get('classes', [])
if classes in [['menuselection']]:
self.body.append(r'\sphinxmenuselection{')
self.context.append('}')
elif classes in [['guilabel']]:
self.body.append(r'\sphinxguilabel{')
self.context.append('}')
elif classes in [['accelerator']]:
self.body.append(r'\sphinxaccelerator{')
self.context.append('}')
elif classes and not self.in_title:
self.body.append(r'\DUrole{%s}{' % ','.join(classes))
self.context.append('}')
else:
self.context.append('')
def depart_inline(self, node: Element) -> None:
self.body.append(self.context.pop())
def visit_generated(self, node: Element) -> None:
pass
def depart_generated(self, node: Element) -> None:
pass
def visit_compound(self, node: Element) -> None:
pass
def depart_compound(self, node: Element) -> None:
pass
def visit_container(self, node: Element) -> None:
classes = node.get('classes', [])
for c in classes:
self.body.append('\n\\begin{sphinxuseclass}{%s}' % c)
def depart_container(self, node: Element) -> None:
classes = node.get('classes', [])
for c in classes:
self.body.append('\n\\end{sphinxuseclass}')
def visit_decoration(self, node: Element) -> None:
pass
def depart_decoration(self, node: Element) -> None:
pass
# docutils-generated elements that we don't support
def visit_header(self, node: Element) -> None:
raise nodes.SkipNode
def visit_footer(self, node: Element) -> None:
raise nodes.SkipNode
def visit_docinfo(self, node: Element) -> None:
raise nodes.SkipNode
# text handling
def encode(self, text: str) -> str:
text = self.escape(text)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace(CR, r'~\\' + CR).replace(' ', '~')
return text
def encode_uri(self, text: str) -> str:
# TODO: it is probably wrong that this uses texescape.escape()
# this must be checked against hyperref package exact dealings
# mainly, %, #, {, } and \ need escaping via a \ escape
# in \href, the tilde is allowed and must be represented literally
return self.encode(text).replace(r'\textasciitilde{}', '~').\
replace(r'\sphinxhyphen{}', '-').\
replace(r'\textquotesingle{}', "'")
def visit_Text(self, node: Text) -> None:
text = self.encode(node.astext())
self.body.append(text)
def depart_Text(self, node: Text) -> None:
pass
def visit_comment(self, node: Element) -> None:
raise nodes.SkipNode
def visit_meta(self, node: Element) -> None:
# only valid for HTML
raise nodes.SkipNode
def visit_system_message(self, node: Element) -> None:
pass
def depart_system_message(self, node: Element) -> None:
self.body.append(CR)
def visit_math(self, node: Element) -> None:
if self.in_title:
self.body.append(r'\protect\(%s\protect\)' % node.astext())
else:
self.body.append(r'\(%s\)' % node.astext())
raise nodes.SkipNode
def visit_math_block(self, node: Element) -> None:
if node.get('label'):
label = "equation:%s:%s" % (node['docname'], node['label'])
else:
label = None
if node.get('nowrap'):
if label:
self.body.append(r'\label{%s}' % label)
self.body.append(node.astext())
else:
from sphinx.util.math import wrap_displaymath
self.body.append(wrap_displaymath(node.astext(), label,
self.config.math_number_all))
raise nodes.SkipNode
def visit_math_reference(self, node: Element) -> None:
label = "equation:%s:%s" % (node['docname'], node['target'])
eqref_format = self.config.math_eqref_format
if eqref_format:
try:
ref = r'\ref{%s}' % label
self.body.append(eqref_format.format(number=ref))
except KeyError as exc:
logger.warning(__('Invalid math_eqref_format: %r'), exc,
location=node)
self.body.append(r'\eqref{%s}' % label)
else:
self.body.append(r'\eqref{%s}' % label)
def depart_math_reference(self, node: Element) -> None:
pass
def unknown_visit(self, node: Node) -> None:
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
@property
def docclasses(self) -> Tuple[str, str]:
warnings.warn('LaTeXWriter.docclasses() is deprecated.',
RemovedInSphinx70Warning, stacklevel=2)
return ('howto', 'manual')
# FIXME: Workaround to avoid circular import
# refs: https://github.com/sphinx-doc/sphinx/issues/5433
from sphinx.builders.latex.nodes import ( # NOQA isort:skip
HYPERLINK_SUPPORT_NODES, captioned_literal_block, footnotetext,
)
| true | true |
f7338bd5fc8ce08c189fae86311d7c3e1e17a4f7 | 215 | py | Python | muse_score_pdf_exporter.py | kwitee/MuseScoreAutoExporter | d1d3050b73787a8ae2a26b4969480cbcf60abfa1 | [
"MIT"
] | null | null | null | muse_score_pdf_exporter.py | kwitee/MuseScoreAutoExporter | d1d3050b73787a8ae2a26b4969480cbcf60abfa1 | [
"MIT"
] | null | null | null | muse_score_pdf_exporter.py | kwitee/MuseScoreAutoExporter | d1d3050b73787a8ae2a26b4969480cbcf60abfa1 | [
"MIT"
] | null | null | null | import sys
from common import *
def main(muse_score_path, directory_path):
muse_score_export(muse_score_path, directory_path, OutputFormat.pdf)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
| 17.916667 | 72 | 0.744186 | import sys
from common import *
def main(muse_score_path, directory_path):
muse_score_export(muse_score_path, directory_path, OutputFormat.pdf)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
| true | true |
f7338c79c2a36b79835f2421455db0c575892ca0 | 395 | py | Python | docs/components_page/components/spinner/simple.py | glsdown/dash-bootstrap-components | 0ebea4f7de43975f6e3a2958359c4480ae1d4927 | [
"Apache-2.0"
] | 776 | 2019-02-07T19:36:59.000Z | 2022-03-31T05:53:04.000Z | docs/components_page/components/spinner/simple.py | glsdown/dash-bootstrap-components | 0ebea4f7de43975f6e3a2958359c4480ae1d4927 | [
"Apache-2.0"
] | 350 | 2019-02-05T10:42:19.000Z | 2022-03-31T19:23:35.000Z | docs/components_page/components/spinner/simple.py | glsdown/dash-bootstrap-components | 0ebea4f7de43975f6e3a2958359c4480ae1d4927 | [
"Apache-2.0"
] | 219 | 2019-02-10T13:46:25.000Z | 2022-03-23T17:03:39.000Z | import dash_bootstrap_components as dbc
from dash import html
spinners = html.Div(
[
dbc.Spinner(color="primary"),
dbc.Spinner(color="secondary"),
dbc.Spinner(color="success"),
dbc.Spinner(color="warning"),
dbc.Spinner(color="danger"),
dbc.Spinner(color="info"),
dbc.Spinner(color="light"),
dbc.Spinner(color="dark"),
]
)
| 24.6875 | 39 | 0.602532 | import dash_bootstrap_components as dbc
from dash import html
spinners = html.Div(
[
dbc.Spinner(color="primary"),
dbc.Spinner(color="secondary"),
dbc.Spinner(color="success"),
dbc.Spinner(color="warning"),
dbc.Spinner(color="danger"),
dbc.Spinner(color="info"),
dbc.Spinner(color="light"),
dbc.Spinner(color="dark"),
]
)
| true | true |
f7338cfd399d2b0c39454b622b16d13949a6c4b0 | 4,484 | py | Python | src/s2_put_skeleton_txts_to_a_single_txt.py | SilviaVec/Realtime-Action-Recognition | 330a64fc1b2158b1884a1ee86b9cc875925fc121 | [
"MIT"
] | null | null | null | src/s2_put_skeleton_txts_to_a_single_txt.py | SilviaVec/Realtime-Action-Recognition | 330a64fc1b2158b1884a1ee86b9cc875925fc121 | [
"MIT"
] | 3 | 2020-06-08T14:22:36.000Z | 2020-06-08T14:27:52.000Z | src/s2_put_skeleton_txts_to_a_single_txt.py | mmlab-cv/Realtime-Action-Recognition | 330a64fc1b2158b1884a1ee86b9cc875925fc121 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
'''
Read multiple skeletons txts and saved them into a single txt.
If an image doesn't have skeleton, discard it.
If an image label is not `CLASSES`, discard it.
Input:
`skeletons/00001.txt` ~ `skeletons/xxxxx.txt` from `SRC_DETECTED_SKELETONS_FOLDER`.
Output:
`skeletons_info.txt`. The filepath is `DST_ALL_SKELETONS_TXT`.
'''
import numpy as np
import simplejson
import collections
if True: # Include project path
import sys
import os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../"
CURR_PATH = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.append(ROOT)
# import utils.lib_feature_proc # This is no needed,
# because this script only transfer (part of) the data from many txts to a single txt,
# without doing any data analsysis.
import utils.lib_commons as lib_commons
def par(path): # Pre-Append ROOT to the path if it's not absolute
return ROOT + path if (path and path[0] != "/") else path
# -- Settings
cfg_all = lib_commons.read_yaml(ROOT + "config/config.yaml")
cfg = cfg_all["s2_put_skeleton_txts_to_a_single_txt.py"]
CLASSES = np.array(cfg_all["classes"])
SKELETON_FILENAME_FORMAT = cfg_all["skeleton_filename_format"]
SRC_DETECTED_SKELETONS_FOLDER = par(cfg["input"]["detected_skeletons_folder"])
DST_ALL_SKELETONS_TXT = par(cfg["output"]["all_skeletons_txt"])
IDX_PERSON = 0 # Only use the skeleton of the 0th person in each image
IDX_ACTION_LABEL = 3 # [1, 7, 54, "jump", "jump_03-02-12-34-01-795/00240.jpg"]
# -- Helper function
def read_skeletons_from_ith_txt(i):
'''
Arguments:
i {int}: the ith skeleton txt. Zero-based index.
If there are mutliple people, then there are multiple skeletons' data in this txt.
Return:
skeletons_in_ith_txt {list of list}:
Length of each skeleton data is supposed to be 56 = 5 image info + 51 xyz positions.
'''
filename = SRC_DETECTED_SKELETONS_FOLDER + \
SKELETON_FILENAME_FORMAT.format(i)
skeletons_in_ith_txt = lib_commons.read_listlist(filename)
return skeletons_in_ith_txt
def get_length_of_one_skeleton_data(filepaths):
''' Find a non-empty txt file, and then get the length of one skeleton data.
The data length should be 59, where:
59 = 5 + 54.
5: [cnt_action, cnt_clip, cnt_image, action_label, filepath]
See utils.lib_io.get_training_imgs_info for more details
54: 18 joints * 3 xyz positions
'''
for i in range(len(filepaths)):
skeletons = read_skeletons_from_ith_txt(i)
if len(skeletons):
skeleton = skeletons[IDX_PERSON]
data_size = len(skeleton)
assert(data_size == 59) #MODIFIED
return data_size
raise RuntimeError(f"No valid txt under: {SRC_DETECTED_SKELETONS_FOLDER}.")
# -- Main
if __name__ == "__main__":
''' Read multiple skeletons txts and saved them into a single txt. '''
# -- Get skeleton filenames
filepaths = lib_commons.get_filenames(SRC_DETECTED_SKELETONS_FOLDER,
use_sort=True, with_folder_path=True)
num_skeletons = len(filepaths)
# -- Check data length of one skeleton
data_length = get_length_of_one_skeleton_data(filepaths)
print("Data length of one skeleton is {data_length}")
# -- Read in skeletons and push to all_skeletons
all_skeletons = []
labels_cnt = collections.defaultdict(int)
for i in range(num_skeletons):
# Read skeletons from a txt
skeletons = read_skeletons_from_ith_txt(i)
if not skeletons: # If empty, discard this image.
continue
skeleton = skeletons[IDX_PERSON]
label = skeleton[IDX_ACTION_LABEL]
if label not in CLASSES: # If invalid label, discard this image.
continue
labels_cnt[label] += 1
# Push to result
all_skeletons.append(skeleton)
# Print
if i == 1 or i % 100 == 0:
print("{}/{}".format(i, num_skeletons))
# -- Save to txt
with open(DST_ALL_SKELETONS_TXT, 'w') as f:
simplejson.dump(all_skeletons, f)
print(f"There are {len(all_skeletons)} skeleton data.")
print(f"They are saved to {DST_ALL_SKELETONS_TXT}")
print("Number of each action: ")
for label in CLASSES:
print(f" {label}: {labels_cnt[label]}")
| 34.229008 | 97 | 0.662801 |
import numpy as np
import simplejson
import collections
if True:
import sys
import os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../"
CURR_PATH = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.append(ROOT)
tils.lib_commons as lib_commons
def par(path):
return ROOT + path if (path and path[0] != "/") else path
# -- Settings
cfg_all = lib_commons.read_yaml(ROOT + "config/config.yaml")
cfg = cfg_all["s2_put_skeleton_txts_to_a_single_txt.py"]
CLASSES = np.array(cfg_all["classes"])
SKELETON_FILENAME_FORMAT = cfg_all["skeleton_filename_format"]
SRC_DETECTED_SKELETONS_FOLDER = par(cfg["input"]["detected_skeletons_folder"])
DST_ALL_SKELETONS_TXT = par(cfg["output"]["all_skeletons_txt"])
IDX_PERSON = 0 # Only use the skeleton of the 0th person in each image
IDX_ACTION_LABEL = 3 # [1, 7, 54, "jump", "jump_03-02-12-34-01-795/00240.jpg"]
# -- Helper function
def read_skeletons_from_ith_txt(i):
filename = SRC_DETECTED_SKELETONS_FOLDER + \
SKELETON_FILENAME_FORMAT.format(i)
skeletons_in_ith_txt = lib_commons.read_listlist(filename)
return skeletons_in_ith_txt
def get_length_of_one_skeleton_data(filepaths):
for i in range(len(filepaths)):
skeletons = read_skeletons_from_ith_txt(i)
if len(skeletons):
skeleton = skeletons[IDX_PERSON]
data_size = len(skeleton)
assert(data_size == 59) #MODIFIED
return data_size
raise RuntimeError(f"No valid txt under: {SRC_DETECTED_SKELETONS_FOLDER}.")
# -- Main
if __name__ == "__main__":
# -- Get skeleton filenames
filepaths = lib_commons.get_filenames(SRC_DETECTED_SKELETONS_FOLDER,
use_sort=True, with_folder_path=True)
num_skeletons = len(filepaths)
# -- Check data length of one skeleton
data_length = get_length_of_one_skeleton_data(filepaths)
print("Data length of one skeleton is {data_length}")
# -- Read in skeletons and push to all_skeletons
all_skeletons = []
labels_cnt = collections.defaultdict(int)
for i in range(num_skeletons):
# Read skeletons from a txt
skeletons = read_skeletons_from_ith_txt(i)
if not skeletons: # If empty, discard this image.
continue
skeleton = skeletons[IDX_PERSON]
label = skeleton[IDX_ACTION_LABEL]
if label not in CLASSES: # If invalid label, discard this image.
continue
labels_cnt[label] += 1
# Push to result
all_skeletons.append(skeleton)
# Print
if i == 1 or i % 100 == 0:
print("{}/{}".format(i, num_skeletons))
# -- Save to txt
with open(DST_ALL_SKELETONS_TXT, 'w') as f:
simplejson.dump(all_skeletons, f)
print(f"There are {len(all_skeletons)} skeleton data.")
print(f"They are saved to {DST_ALL_SKELETONS_TXT}")
print("Number of each action: ")
for label in CLASSES:
print(f" {label}: {labels_cnt[label]}")
| true | true |
f7338d56e91bbfd73a238452f2b8f6fba056c9ac | 190 | py | Python | _celery/Celery/demo/celery_app/task2.py | yc19890920/ap | 5df907afdeeea06befbb29c11f2bab8ff06efb16 | [
"Apache-2.0"
] | 1 | 2021-01-11T06:30:44.000Z | 2021-01-11T06:30:44.000Z | _celery/Celery/demo/celery_app/task2.py | yc19890920/ap | 5df907afdeeea06befbb29c11f2bab8ff06efb16 | [
"Apache-2.0"
] | 23 | 2020-02-12T02:35:49.000Z | 2022-02-11T03:45:40.000Z | _celery/Celery/demo/celery_app/task2.py | yc19890920/ap | 5df907afdeeea06befbb29c11f2bab8ff06efb16 | [
"Apache-2.0"
] | 2 | 2020-04-08T15:39:46.000Z | 2020-10-10T10:13:09.000Z | # -*- coding: utf-8 -*-
import time
from celery_app import app
@app.task
@app.task(queue='test_celey_queue_multiply')
def multiply(x, y):
# time.sleep(0.02)
return x * y
| 17.272727 | 45 | 0.631579 |
import time
from celery_app import app
@app.task
@app.task(queue='test_celey_queue_multiply')
def multiply(x, y):
return x * y
| true | true |
f7338eebb28d83e5ed91ee4013c8eac11bcbfae5 | 352 | py | Python | utils.py | schorrm/arm2riscv | 5fa28e28d920705b660874a03b9906fae710b442 | [
"MIT"
] | 8 | 2020-07-07T13:08:26.000Z | 2022-03-29T23:12:37.000Z | utils.py | schorrm/arm2riscv | 5fa28e28d920705b660874a03b9906fae710b442 | [
"MIT"
] | 2 | 2020-04-05T07:17:22.000Z | 2021-06-27T22:33:25.000Z | utils.py | schorrm/arm2riscv | 5fa28e28d920705b660874a03b9906fae710b442 | [
"MIT"
] | 1 | 2021-06-19T12:38:45.000Z | 2021-06-19T12:38:45.000Z | #!/usr/bin/python3
class InstructionNotRecognized(Exception):
''' Exception to throw when an instruction does not have defined conversion code '''
pass
reg_labels = """ .section .tdata
REG_BANK:
.dword 0
.dword 0
.dword 0
.dword 0
.dword 0
.dword 0
.dword 0
.dword 0
""" | 19.555556 | 88 | 0.5625 |
class InstructionNotRecognized(Exception):
pass
reg_labels = """ .section .tdata
REG_BANK:
.dword 0
.dword 0
.dword 0
.dword 0
.dword 0
.dword 0
.dword 0
.dword 0
""" | true | true |
f7338f6dd3f181e895e19eec66ca21d59cbbdafa | 14,786 | py | Python | Source/JavaScriptCore/inspector/scripts/codegen/cpp_generator.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 6 | 2021-07-05T16:09:39.000Z | 2022-03-06T22:44:42.000Z | Source/JavaScriptCore/inspector/scripts/codegen/cpp_generator.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 7 | 2022-03-15T13:25:39.000Z | 2022-03-15T13:25:44.000Z | Source/JavaScriptCore/inspector/scripts/codegen/cpp_generator.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2014-2018 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os.path
import re
try:
from .generator import ucfirst, Generator
from .models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
except ValueError:
from generator import ucfirst, Generator
from models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
log = logging.getLogger('global')
_PRIMITIVE_TO_CPP_NAME_MAP = {
'boolean': 'bool',
'integer': 'int',
'number': 'double',
'string': 'String',
'object': 'JSON::Object',
'array': 'JSON::Array',
'any': 'JSON::Value'
}
class CppGenerator(Generator):
def __init__(self, *args, **kwargs):
Generator.__init__(self, *args, **kwargs)
def protocol_name(self):
return self.model().framework.setting('cpp_protocol_group', '')
def helpers_namespace(self):
return '%sHelpers' % self.protocol_name()
# Miscellaneous text manipulation routines.
@staticmethod
def cpp_getter_method_for_type(_type):
if isinstance(_type, ObjectType):
return 'getObject'
if isinstance(_type, ArrayType):
return 'getArray'
if isinstance(_type, PrimitiveType):
if _type.raw_name() == 'integer':
return 'getInteger'
elif _type.raw_name() == 'number':
return 'getDouble'
elif _type.raw_name() == 'any':
return 'getValue'
else:
return 'get' + ucfirst(_type.raw_name())
if isinstance(_type, AliasedType):
return CppGenerator.cpp_getter_method_for_type(_type.aliased_type)
if isinstance(_type, EnumType):
return CppGenerator.cpp_getter_method_for_type(_type.primitive_type)
@staticmethod
def cpp_setter_method_for_type(_type):
if isinstance(_type, ObjectType):
return 'setObject'
if isinstance(_type, ArrayType):
return 'setArray'
if isinstance(_type, PrimitiveType):
if _type.raw_name() == 'integer':
return 'setInteger'
elif _type.raw_name() == 'number':
return 'setDouble'
elif _type.raw_name() == 'any':
return 'setValue'
else:
return 'set' + ucfirst(_type.raw_name())
if isinstance(_type, AliasedType):
return CppGenerator.cpp_setter_method_for_type(_type.aliased_type)
if isinstance(_type, EnumType):
return CppGenerator.cpp_setter_method_for_type(_type.primitive_type)
# Generate type representations for various situations.
@staticmethod
def cpp_protocol_type_for_type(_type):
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through to enum or primitive.
if isinstance(_type, ObjectType) and len(_type.members) == 0:
return 'JSON::Object'
if isinstance(_type, ArrayType):
if _type.raw_name() is None: # Otherwise, fall through and use typedef'd name.
return 'JSON::ArrayOf<%s>' % CppGenerator.cpp_protocol_type_for_type(_type.element_type)
if isinstance(_type, (ObjectType, EnumType, ArrayType)):
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if isinstance(_type, PrimitiveType):
return CppGenerator.cpp_name_for_primitive_type(_type)
@staticmethod
def cpp_protocol_type_for_type_member(type_member, object_declaration):
if isinstance(type_member.type, EnumType) and type_member.type.is_anonymous:
return '::'.join([CppGenerator.cpp_protocol_type_for_type(object_declaration.type), ucfirst(type_member.member_name)])
else:
return CppGenerator.cpp_protocol_type_for_type(type_member.type)
@staticmethod
def cpp_type_for_unchecked_formal_in_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through to enum or primitive.
if isinstance(_type, EnumType):
_type = _type.primitive_type # Fall through to primitive.
# This handles the 'any' type and objects with defined properties.
if isinstance(_type, ObjectType) or _type.qualified_name() == 'object':
cpp_name = 'JSON::Object'
if parameter.is_optional:
return 'const %s*' % cpp_name
else:
return 'const %s&' % cpp_name
if isinstance(_type, ArrayType):
cpp_name = 'JSON::Array'
if parameter.is_optional:
return 'const %s*' % cpp_name
else:
return 'const %s&' % cpp_name
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return 'const %s*' % cpp_name
elif _type.raw_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
return "unknown_unchecked_formal_in_parameter_type"
@staticmethod
def cpp_type_for_checked_formal_event_parameter(parameter):
return CppGenerator.cpp_type_for_type_with_name(parameter.type, parameter.parameter_name, parameter.is_optional)
@staticmethod
def cpp_type_for_type_member(member):
return CppGenerator.cpp_type_for_type_with_name(member.type, member.member_name, False)
@staticmethod
def cpp_type_for_type_with_name(_type, type_name, is_optional):
if isinstance(_type, (ArrayType, ObjectType)):
return 'RefPtr<%s>' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, AliasedType):
builder_type = CppGenerator.cpp_protocol_type_for_type(_type)
if is_optional:
return 'const %s*' % builder_type
elif _type.aliased_type.qualified_name() in ['integer', 'number']:
return CppGenerator.cpp_name_for_primitive_type(_type.aliased_type)
elif _type.aliased_type.qualified_name() in ['string']:
return 'const %s&' % builder_type
else:
return builder_type
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if _type.qualified_name() in ['object']:
return 'RefPtr<JSON::Object>'
elif _type.qualified_name() in ['any']:
return 'RefPtr<JSON::Value>'
elif is_optional:
return 'const %s*' % cpp_name
elif _type.qualified_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
enum_type_name = ucfirst(type_name)
else:
enum_type_name = 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if is_optional:
return '%s*' % enum_type_name
else:
return '%s' % enum_type_name
@staticmethod
def cpp_type_for_formal_out_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, (ObjectType, ArrayType)):
return 'RefPtr<%s>&' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Optional<%s>&" % cpp_name
else:
return '%s*' % cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
return '%sBackendDispatcherHandler::%s*' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
return 'Inspector::Protocol::%s::%s*' % (_type.type_domain().domain_name, _type.raw_name())
raise ValueError("unknown formal out parameter type.")
# FIXME: this is only slightly different from out parameters; they could be unified.
@staticmethod
def cpp_type_for_formal_async_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, (ObjectType, ArrayType)):
return 'RefPtr<%s>&&' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Optional<%s>&" % cpp_name
elif _type.qualified_name() in ['integer', 'number']:
return CppGenerator.cpp_name_for_primitive_type(_type)
elif _type.qualified_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
cpp_name = '%sBackendDispatcherHandler::%s' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
cpp_name = 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if parameter.is_optional:
return "Optional<%s>" % cpp_name
else:
return cpp_name
raise ValueError("Unknown formal async parameter type.")
# In-parameters don't use builder types, because they could be passed
# "open types" that are manually constructed out of InspectorObjects.
# FIXME: Only parameters that are actually open types should need non-builder parameter types.
@staticmethod
def cpp_type_for_stack_in_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, EnumType):
_type = _type.primitive_type # Fall through.
if isinstance(_type, ObjectType):
return "RefPtr<JSON::Object>"
if isinstance(_type, ArrayType):
return "RefPtr<JSON::Array>"
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if _type.qualified_name() in ['any', 'object']:
return "RefPtr<%s>" % CppGenerator.cpp_name_for_primitive_type(_type)
elif parameter.is_optional and _type.qualified_name() not in ['boolean', 'string', 'integer', 'number']:
return "Optional<%s>" % cpp_name
else:
return cpp_name
@staticmethod
def cpp_type_for_stack_out_parameter(parameter):
_type = parameter.type
if isinstance(_type, (ArrayType, ObjectType)):
return 'RefPtr<%s>' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, AliasedType):
builder_type = CppGenerator.cpp_protocol_type_for_type(_type)
if parameter.is_optional:
return "Optional<%s>" % builder_type
return '%s' % builder_type
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Optional<%s>" % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
return '%sBackendDispatcherHandler::%s' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
@staticmethod
def cpp_assertion_method_for_type_member(type_member, object_declaration):
def assertion_method_for_type(_type):
return 'BindingTraits<%s>::assertValueHasExpectedType' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(type_member.type, AliasedType):
return assertion_method_for_type(type_member.type.aliased_type)
if isinstance(type_member.type, EnumType) and type_member.type.is_anonymous:
return 'BindingTraits<%s>::assertValueHasExpectedType' % CppGenerator.cpp_protocol_type_for_type_member(type_member, object_declaration)
return assertion_method_for_type(type_member.type)
@staticmethod
def cpp_name_for_primitive_type(_type):
return _PRIMITIVE_TO_CPP_NAME_MAP.get(_type.raw_name())
# Decide whether certain helpers are necessary in a situation.
@staticmethod
def should_use_wrapper_for_return_type(_type):
return not isinstance(_type, (ArrayType, ObjectType))
@staticmethod
def should_use_references_for_type(_type):
return isinstance(_type, (ArrayType, ObjectType)) or (isinstance(_type, (PrimitiveType)) and _type.qualified_name() in ["any", "object"])
@staticmethod
def should_pass_by_copy_for_return_type(_type):
return isinstance(_type, (ArrayType, ObjectType)) or (isinstance(_type, (PrimitiveType)) and _type.qualified_name() == "object")
| 44.269461 | 148 | 0.653524 |
import logging
import os.path
import re
try:
from .generator import ucfirst, Generator
from .models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
except ValueError:
from generator import ucfirst, Generator
from models import PrimitiveType, ObjectType, ArrayType, EnumType, AliasedType, Frameworks
log = logging.getLogger('global')
_PRIMITIVE_TO_CPP_NAME_MAP = {
'boolean': 'bool',
'integer': 'int',
'number': 'double',
'string': 'String',
'object': 'JSON::Object',
'array': 'JSON::Array',
'any': 'JSON::Value'
}
class CppGenerator(Generator):
def __init__(self, *args, **kwargs):
Generator.__init__(self, *args, **kwargs)
def protocol_name(self):
return self.model().framework.setting('cpp_protocol_group', '')
def helpers_namespace(self):
return '%sHelpers' % self.protocol_name()
@staticmethod
def cpp_getter_method_for_type(_type):
if isinstance(_type, ObjectType):
return 'getObject'
if isinstance(_type, ArrayType):
return 'getArray'
if isinstance(_type, PrimitiveType):
if _type.raw_name() == 'integer':
return 'getInteger'
elif _type.raw_name() == 'number':
return 'getDouble'
elif _type.raw_name() == 'any':
return 'getValue'
else:
return 'get' + ucfirst(_type.raw_name())
if isinstance(_type, AliasedType):
return CppGenerator.cpp_getter_method_for_type(_type.aliased_type)
if isinstance(_type, EnumType):
return CppGenerator.cpp_getter_method_for_type(_type.primitive_type)
@staticmethod
def cpp_setter_method_for_type(_type):
if isinstance(_type, ObjectType):
return 'setObject'
if isinstance(_type, ArrayType):
return 'setArray'
if isinstance(_type, PrimitiveType):
if _type.raw_name() == 'integer':
return 'setInteger'
elif _type.raw_name() == 'number':
return 'setDouble'
elif _type.raw_name() == 'any':
return 'setValue'
else:
return 'set' + ucfirst(_type.raw_name())
if isinstance(_type, AliasedType):
return CppGenerator.cpp_setter_method_for_type(_type.aliased_type)
if isinstance(_type, EnumType):
return CppGenerator.cpp_setter_method_for_type(_type.primitive_type)
@staticmethod
def cpp_protocol_type_for_type(_type):
if isinstance(_type, AliasedType):
_type = _type.aliased_type
if isinstance(_type, ObjectType) and len(_type.members) == 0:
return 'JSON::Object'
if isinstance(_type, ArrayType):
if _type.raw_name() is None:
return 'JSON::ArrayOf<%s>' % CppGenerator.cpp_protocol_type_for_type(_type.element_type)
if isinstance(_type, (ObjectType, EnumType, ArrayType)):
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if isinstance(_type, PrimitiveType):
return CppGenerator.cpp_name_for_primitive_type(_type)
@staticmethod
def cpp_protocol_type_for_type_member(type_member, object_declaration):
if isinstance(type_member.type, EnumType) and type_member.type.is_anonymous:
return '::'.join([CppGenerator.cpp_protocol_type_for_type(object_declaration.type), ucfirst(type_member.member_name)])
else:
return CppGenerator.cpp_protocol_type_for_type(type_member.type)
@staticmethod
def cpp_type_for_unchecked_formal_in_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through to enum or primitive.
if isinstance(_type, EnumType):
_type = _type.primitive_type # Fall through to primitive.
# This handles the 'any' type and objects with defined properties.
if isinstance(_type, ObjectType) or _type.qualified_name() == 'object':
cpp_name = 'JSON::Object'
if parameter.is_optional:
return 'const %s*' % cpp_name
else:
return 'const %s&' % cpp_name
if isinstance(_type, ArrayType):
cpp_name = 'JSON::Array'
if parameter.is_optional:
return 'const %s*' % cpp_name
else:
return 'const %s&' % cpp_name
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return 'const %s*' % cpp_name
elif _type.raw_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
return "unknown_unchecked_formal_in_parameter_type"
@staticmethod
def cpp_type_for_checked_formal_event_parameter(parameter):
return CppGenerator.cpp_type_for_type_with_name(parameter.type, parameter.parameter_name, parameter.is_optional)
@staticmethod
def cpp_type_for_type_member(member):
return CppGenerator.cpp_type_for_type_with_name(member.type, member.member_name, False)
@staticmethod
def cpp_type_for_type_with_name(_type, type_name, is_optional):
if isinstance(_type, (ArrayType, ObjectType)):
return 'RefPtr<%s>' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, AliasedType):
builder_type = CppGenerator.cpp_protocol_type_for_type(_type)
if is_optional:
return 'const %s*' % builder_type
elif _type.aliased_type.qualified_name() in ['integer', 'number']:
return CppGenerator.cpp_name_for_primitive_type(_type.aliased_type)
elif _type.aliased_type.qualified_name() in ['string']:
return 'const %s&' % builder_type
else:
return builder_type
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if _type.qualified_name() in ['object']:
return 'RefPtr<JSON::Object>'
elif _type.qualified_name() in ['any']:
return 'RefPtr<JSON::Value>'
elif is_optional:
return 'const %s*' % cpp_name
elif _type.qualified_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
enum_type_name = ucfirst(type_name)
else:
enum_type_name = 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if is_optional:
return '%s*' % enum_type_name
else:
return '%s' % enum_type_name
@staticmethod
def cpp_type_for_formal_out_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, (ObjectType, ArrayType)):
return 'RefPtr<%s>&' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Optional<%s>&" % cpp_name
else:
return '%s*' % cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
return '%sBackendDispatcherHandler::%s*' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
return 'Inspector::Protocol::%s::%s*' % (_type.type_domain().domain_name, _type.raw_name())
raise ValueError("unknown formal out parameter type.")
# FIXME: this is only slightly different from out parameters; they could be unified.
@staticmethod
def cpp_type_for_formal_async_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type # Fall through.
if isinstance(_type, (ObjectType, ArrayType)):
return 'RefPtr<%s>&&' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Optional<%s>&" % cpp_name
elif _type.qualified_name() in ['integer', 'number']:
return CppGenerator.cpp_name_for_primitive_type(_type)
elif _type.qualified_name() in ['string']:
return 'const %s&' % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
cpp_name = '%sBackendDispatcherHandler::%s' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
cpp_name = 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
if parameter.is_optional:
return "Optional<%s>" % cpp_name
else:
return cpp_name
raise ValueError("Unknown formal async parameter type.")
# In-parameters don't use builder types, because they could be passed
@staticmethod
def cpp_type_for_stack_in_parameter(parameter):
_type = parameter.type
if isinstance(_type, AliasedType):
_type = _type.aliased_type
if isinstance(_type, EnumType):
_type = _type.primitive_type
if isinstance(_type, ObjectType):
return "RefPtr<JSON::Object>"
if isinstance(_type, ArrayType):
return "RefPtr<JSON::Array>"
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if _type.qualified_name() in ['any', 'object']:
return "RefPtr<%s>" % CppGenerator.cpp_name_for_primitive_type(_type)
elif parameter.is_optional and _type.qualified_name() not in ['boolean', 'string', 'integer', 'number']:
return "Optional<%s>" % cpp_name
else:
return cpp_name
@staticmethod
def cpp_type_for_stack_out_parameter(parameter):
_type = parameter.type
if isinstance(_type, (ArrayType, ObjectType)):
return 'RefPtr<%s>' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(_type, AliasedType):
builder_type = CppGenerator.cpp_protocol_type_for_type(_type)
if parameter.is_optional:
return "Optional<%s>" % builder_type
return '%s' % builder_type
if isinstance(_type, PrimitiveType):
cpp_name = CppGenerator.cpp_name_for_primitive_type(_type)
if parameter.is_optional:
return "Optional<%s>" % cpp_name
else:
return cpp_name
if isinstance(_type, EnumType):
if _type.is_anonymous:
return '%sBackendDispatcherHandler::%s' % (_type.type_domain().domain_name, ucfirst(parameter.parameter_name))
else:
return 'Inspector::Protocol::%s::%s' % (_type.type_domain().domain_name, _type.raw_name())
@staticmethod
def cpp_assertion_method_for_type_member(type_member, object_declaration):
def assertion_method_for_type(_type):
return 'BindingTraits<%s>::assertValueHasExpectedType' % CppGenerator.cpp_protocol_type_for_type(_type)
if isinstance(type_member.type, AliasedType):
return assertion_method_for_type(type_member.type.aliased_type)
if isinstance(type_member.type, EnumType) and type_member.type.is_anonymous:
return 'BindingTraits<%s>::assertValueHasExpectedType' % CppGenerator.cpp_protocol_type_for_type_member(type_member, object_declaration)
return assertion_method_for_type(type_member.type)
@staticmethod
def cpp_name_for_primitive_type(_type):
return _PRIMITIVE_TO_CPP_NAME_MAP.get(_type.raw_name())
@staticmethod
def should_use_wrapper_for_return_type(_type):
return not isinstance(_type, (ArrayType, ObjectType))
@staticmethod
def should_use_references_for_type(_type):
return isinstance(_type, (ArrayType, ObjectType)) or (isinstance(_type, (PrimitiveType)) and _type.qualified_name() in ["any", "object"])
@staticmethod
def should_pass_by_copy_for_return_type(_type):
return isinstance(_type, (ArrayType, ObjectType)) or (isinstance(_type, (PrimitiveType)) and _type.qualified_name() == "object")
| true | true |
f73390ff913ad0d3db1ad7b68b7cc2ba3cb10194 | 3,815 | py | Python | keepercommander/custom/send_breachwatch_reminder.py | Keeper-Security/commander | 93fee5d2ba56f2288e00ab33003597d00a302b5c | [
"MIT"
] | null | null | null | keepercommander/custom/send_breachwatch_reminder.py | Keeper-Security/commander | 93fee5d2ba56f2288e00ab33003597d00a302b5c | [
"MIT"
] | null | null | null | keepercommander/custom/send_breachwatch_reminder.py | Keeper-Security/commander | 93fee5d2ba56f2288e00ab33003597d00a302b5c | [
"MIT"
] | null | null | null | # _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2022 Keeper Security Inc.
# Contact: commander@keepersecurity.com
#
# Example script to run a BreachWatch status report, parse the results,
# and send users an email reminder to address their found issues.
#
# Note: SMTP credentials must be supplied via a vault record
# in order to send the email.
#
# This example also pulls configuration
# from config.json or writes the config file if it does not exist.
#
# Usage:
# python send_breachwatch_reminder.py
import base64
import getpass
import json
import os
import ssl
from smtplib import SMTP
from keepercommander import api, vault_extensions, vault
from keepercommander.commands.enterprise import SecurityAuditReportCommand
from keepercommander.params import KeeperParams
email_message = '''
From: {0}
Subject: Keeper BreachWatch Alert
BreachWatch detected records at risk in your vault.
Please login to Keeper and review the records marked "At Risk".
'''
def read_config_file(params):
params.config_filename = os.path.join(os.path.dirname(__file__), 'config.json')
if os.path.isfile(params.config_filename):
with open(params.config_filename, 'r') as f:
params.config = json.load(f)
if 'user' in params.config:
params.user = params.config['user']
if 'password' in params.config:
params.password = params.config['password']
if 'mfa_token' in params.config:
params.mfa_token = params.config['mfa_token']
if 'server' in params.config:
params.server = params.config['server']
if 'device_id' in params.config:
device_id = base64.urlsafe_b64decode(params.config['device_id'] + '==')
params.rest_context.device_id = device_id
my_params = KeeperParams()
read_config_file(my_params)
while not my_params.user:
my_params.user = getpass.getpass(prompt='User(Email): ', stream=None)
while not my_params.password:
my_params.password = getpass.getpass(prompt='Master Password: ', stream=None)
report_command = SecurityAuditReportCommand()
report_json = report_command.execute(my_params, breachwatch=True, format='json')
report = json.loads(report_json)
emails = [x['email'] for x in report if x.get('at_risk') > 5]
if emails:
api.sync_down(my_params)
smtp_record = next(vault_extensions.find_records(my_params, search_str='smtp', record_type='serverCredentials'), None)
if isinstance(smtp_record, vault.TypedRecord):
smtp_host = None
smtp_port = 0
username = None
password = None
field = smtp_record.get_typed_field('host')
if field:
host_value = field.get_default_value()
if isinstance(host_value, dict):
smtp_host = host_value.get('hostName')
port = host_value.get('port')
if port:
try:
smtp_port = int(port)
except ValueError:
pass
if smtp_host:
field = smtp_record.get_typed_field('login')
if field:
username = field.get_default_value()
field = smtp_record.get_typed_field('password')
if field:
password = field.get_default_value()
if smtp_host:
with SMTP(host=smtp_host, port=smtp_port) as connection:
if username:
connection.starttls(context=ssl.create_default_context())
connection.login(user=username, password=password)
connection.sendmail(my_params.user, emails, email_message.format(my_params.user))
| 34.0625 | 122 | 0.647182 |
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2022 Keeper Security Inc.
# Contact: commander@keepersecurity.com
#
# Example script to run a BreachWatch status report, parse the results,
# and send users an email reminder to address their found issues.
#
# Note: SMTP credentials must be supplied via a vault record
# in order to send the email.
#
# This example also pulls configuration
# from config.json or writes the config file if it does not exist.
#
# Usage:
# python send_breachwatch_reminder.py
import base64
import getpass
import json
import os
import ssl
from smtplib import SMTP
from keepercommander import api, vault_extensions, vault
from keepercommander.commands.enterprise import SecurityAuditReportCommand
from keepercommander.params import KeeperParams
email_message = '''
From: {0}
Subject: Keeper BreachWatch Alert
BreachWatch detected records at risk in your vault.
Please login to Keeper and review the records marked "At Risk".
'''
def read_config_file(params):
params.config_filename = os.path.join(os.path.dirname(__file__), 'config.json')
if os.path.isfile(params.config_filename):
with open(params.config_filename, 'r') as f:
params.config = json.load(f)
if 'user' in params.config:
params.user = params.config['user']
if 'password' in params.config:
params.password = params.config['password']
if 'mfa_token' in params.config:
params.mfa_token = params.config['mfa_token']
if 'server' in params.config:
params.server = params.config['server']
if 'device_id' in params.config:
device_id = base64.urlsafe_b64decode(params.config['device_id'] + '==')
params.rest_context.device_id = device_id
my_params = KeeperParams()
read_config_file(my_params)
while not my_params.user:
my_params.user = getpass.getpass(prompt='User(Email): ', stream=None)
while not my_params.password:
my_params.password = getpass.getpass(prompt='Master Password: ', stream=None)
report_command = SecurityAuditReportCommand()
report_json = report_command.execute(my_params, breachwatch=True, format='json')
report = json.loads(report_json)
emails = [x['email'] for x in report if x.get('at_risk') > 5]
if emails:
api.sync_down(my_params)
smtp_record = next(vault_extensions.find_records(my_params, search_str='smtp', record_type='serverCredentials'), None)
if isinstance(smtp_record, vault.TypedRecord):
smtp_host = None
smtp_port = 0
username = None
password = None
field = smtp_record.get_typed_field('host')
if field:
host_value = field.get_default_value()
if isinstance(host_value, dict):
smtp_host = host_value.get('hostName')
port = host_value.get('port')
if port:
try:
smtp_port = int(port)
except ValueError:
pass
if smtp_host:
field = smtp_record.get_typed_field('login')
if field:
username = field.get_default_value()
field = smtp_record.get_typed_field('password')
if field:
password = field.get_default_value()
if smtp_host:
with SMTP(host=smtp_host, port=smtp_port) as connection:
if username:
connection.starttls(context=ssl.create_default_context())
connection.login(user=username, password=password)
connection.sendmail(my_params.user, emails, email_message.format(my_params.user))
| true | true |
f73391199401e76d26dbccadc07847533bdcd32e | 2,448 | py | Python | wandb/vendor/graphql-core-1.1/graphql/pyutils/version.py | theodumont/client | 7402ac67ada5bc8078078a49fd3e0cb4b6172307 | [
"MIT"
] | 3,968 | 2017-08-23T21:27:19.000Z | 2022-03-31T22:00:19.000Z | wandb/vendor/graphql-core-1.1/graphql/pyutils/version.py | theodumont/client | 7402ac67ada5bc8078078a49fd3e0cb4b6172307 | [
"MIT"
] | 2,725 | 2017-04-17T00:29:15.000Z | 2022-03-31T21:01:53.000Z | wandb/vendor/graphql-core-1.1/graphql/pyutils/version.py | theodumont/client | 7402ac67ada5bc8078078a49fd3e0cb4b6172307 | [
"MIT"
] | 351 | 2018-04-08T19:39:34.000Z | 2022-03-30T19:38:08.000Z | from __future__ import unicode_literals
import datetime
import os
import subprocess
def get_version(version=None):
"Returns a PEP 440-compliant version number from VERSION."
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
else:
sub = '.dev'
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_main_version(version=None):
"Returns main version (X.Y[.Z]) from VERSION."
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""Returns a tuple of the graphql version. If version argument is non-empty,
then checks for correctness of the tuple provided.
"""
if version is None:
from graphql import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| 30.987342 | 80 | 0.640523 | from __future__ import unicode_literals
import datetime
import os
import subprocess
def get_version(version=None):
version = get_complete_version(version)
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
else:
sub = '.dev'
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_main_version(version=None):
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
if version is None:
from graphql import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
def get_git_changeset():
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| true | true |
f7339188525147d06d219cb552b6c1f3da5b7a37 | 503 | py | Python | creten/indicators/StdDev.py | nardew/Creten | 15ddb0b52e6f2afec2c79b3c731fccb34a2c63d6 | [
"MIT"
] | 9 | 2019-12-17T10:42:40.000Z | 2021-12-02T23:07:05.000Z | creten/indicators/StdDev.py | nardew/Creten | 15ddb0b52e6f2afec2c79b3c731fccb34a2c63d6 | [
"MIT"
] | null | null | null | creten/indicators/StdDev.py | nardew/Creten | 15ddb0b52e6f2afec2c79b3c731fccb34a2c63d6 | [
"MIT"
] | 6 | 2019-03-04T15:01:10.000Z | 2022-01-12T23:22:55.000Z | from indicators.SingleValueIndicator import SingleValueIndicator
from math import sqrt
class StdDev(SingleValueIndicator):
def __init__(self, period, timeSeries = None):
super(StdDev, self).__init__()
self.period = period
self.initialize(timeSeries)
def _calculate(self):
if len(self.timeSeries) < self.period:
return
mean = sum(self.timeSeries[-self.period:]) / self.period
self.values.append(sqrt(sum([(item - mean)**2 for item in self.timeSeries[-self.period:]]) / self.period)) | 29.588235 | 108 | 0.745527 | from indicators.SingleValueIndicator import SingleValueIndicator
from math import sqrt
class StdDev(SingleValueIndicator):
def __init__(self, period, timeSeries = None):
super(StdDev, self).__init__()
self.period = period
self.initialize(timeSeries)
def _calculate(self):
if len(self.timeSeries) < self.period:
return
mean = sum(self.timeSeries[-self.period:]) / self.period
self.values.append(sqrt(sum([(item - mean)**2 for item in self.timeSeries[-self.period:]]) / self.period)) | true | true |
f7339194cf76cb7005d56a755ff75b834296c7fd | 21,029 | py | Python | twisted/python/compat.py | hawkowl/twisted | c413aac3888dea2202c0dc26f978d7f88b4b837a | [
"Unlicense",
"MIT"
] | null | null | null | twisted/python/compat.py | hawkowl/twisted | c413aac3888dea2202c0dc26f978d7f88b4b837a | [
"Unlicense",
"MIT"
] | null | null | null | twisted/python/compat.py | hawkowl/twisted | c413aac3888dea2202c0dc26f978d7f88b4b837a | [
"Unlicense",
"MIT"
] | null | null | null | # -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
@var unicode: The type of Unicode strings, C{unicode} on Python 2 and C{str}
on Python 3.
@var NativeStringIO: An in-memory file-like object that operates on the native
string type (bytes in Python 2, unicode in Python 3).
@var urllib_parse: a URL-parsing module (urlparse on Python 2, urllib.parse on
Python 3)
"""
from __future__ import absolute_import, division
import inspect
import os
import platform
import socket
import string
import struct
import sys
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
if platform.python_implementation() == 'PyPy':
_PYPY = True
else:
_PYPY = False
def _shouldEnableNewStyle():
"""
Returns whether or not we should enable the new-style conversion of
old-style classes. It inspects the environment for C{TWISTED_NEWSTYLE},
accepting an empty string, C{no}, C{false}, C{False}, and C{0} as falsey
values and everything else as a truthy value.
@rtype: L{bool}
"""
value = os.environ.get('TWISTED_NEWSTYLE', '')
if value in ['', 'no', 'false', 'False', '0']:
return False
else:
return True
_EXPECT_NEWSTYLE = _PY3 or _shouldEnableNewStyle()
def currentframe(n=0):
"""
In Python 3, L{inspect.currentframe} does not take a stack-level argument.
Restore that functionality from Python 2 so we don't have to re-implement
the C{f_back}-walking loop in places where it's called.
@param n: The number of stack levels above the caller to walk.
@type n: L{int}
@return: a frame, n levels up the stack from the caller.
@rtype: L{types.FrameType}
"""
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
# These are actually useless in Python 2 as well, but we need to go
# through deprecation process there (ticket #5895):
del adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
"""
Execute a Python script in the given namespaces.
Similar to the execfile builtin, but a namespace is mandatory, partly
because that's a sensible thing to require, and because otherwise we'd
have to do some frame hacking.
This is a compatibility implementation for Python 3 porting, to avoid the
use of the deprecated builtin C{execfile} function.
"""
if locals is None:
locals = globals
with open(filename, "rbU") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
"""
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
"""
# On Python 2, __cmp__ will just work, so no need to add extra methods:
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
"""
Determine the type which will be returned from the given file object's
read() and accepted by its write() method as an argument.
In other words, determine whether the given file is 'opened in text mode'.
@param fileIshObject: Any object, but ideally one which resembles a file.
@type fileIshObject: L{object}
@param default: A default value to return when the type of C{fileIshObject}
cannot be determined.
@type default: L{type}
@return: There are 3 possible return values:
1. L{unicode}, if the file is unambiguously opened in text mode.
2. L{bytes}, if the file is unambiguously opened in binary mode.
3. L{basestring}, if we are on python 2 (the L{basestring} type
does not exist on python 3) and the file is opened in binary
mode, but has an encoding and can therefore accept both bytes
and text reliably for writing, but will return L{bytes} from
read methods.
4. The C{default} parameter, if the given type is not understood.
@rtype: L{type}
"""
if isinstance(fileIshObject, TextIOBase):
# If it's for text I/O, then it's for text I/O.
return unicode
if isinstance(fileIshObject, IOBase):
# If it's for I/O but it's _not_ for text I/O, it's for bytes I/O.
return bytes
encoding = getattr(fileIshObject, 'encoding', None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously unicode.
if encoding:
return unicode
else:
return bytes
if not _PY3:
# Special case: if we have an encoding file, we can *give* it unicode,
# but we can't expect to *get* unicode.
if isinstance(fileIshObject, file):
if encoding is not None:
return basestring
else:
return bytes
from cStringIO import InputType, OutputType
from StringIO import StringIO
if isinstance(fileIshObject, (StringIO, InputType, OutputType)):
return bytes
return default
def nativeString(s):
"""
Convert C{bytes} or C{unicode} to the native C{str} type, using ASCII
encoding if conversion is necessary.
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
"""
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
def _matchingString(constantString, inputString):
"""
Some functions, such as C{os.path.join}, operate on string arguments which
may be bytes or text, and wish to return a value of the same type. In
those cases you may wish to have a string constant (in the case of
C{os.path.join}, that constant would be C{os.path.sep}) involved in the
parsing or processing, that must be of a matching type in order to use
string operations on it. L{_matchingString} will take a constant string
(either L{bytes} or L{unicode}) and convert it to the same type as the
input string. C{constantString} should contain only characters from ASCII;
to ensure this, it will be encoded or decoded regardless.
@param constantString: A string literal used in processing.
@type constantString: L{unicode} or L{bytes}
@param inputString: A byte string or text string provided by the user.
@type inputString: L{unicode} or L{bytes}
@return: C{constantString} converted into the same type as C{inputString}
@rtype: the type of C{inputString}
"""
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or L{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# Functions for dealing with Python 3's bytes type, which is somewhat
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
# Ideally we would use memoryview, but it has a number of differences from
# the Python 2 buffer() that make that impractical
# (http://bugs.python.org/issue15945, incompatibility with pyOpenSSL due to
# PyArg_ParseTuple differences.)
def lazyByteSlice(object, offset=0, size=None):
"""
Return a copy of the given bytes-like object.
If an offset is given, the copy starts at that offset. If a size is
given, the copy will only be of that length.
@param object: C{bytes} to be copied.
@param offset: C{int}, starting index of copy.
@param size: Optional, if an C{int} is given limit the length of copy
to this size.
"""
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
# Ensure we're limited to ASCII subset:
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
# Python 3+
FileType = IOBase
if _PY3:
import urllib.parse as urllib_parse
from html import escape
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from http import cookiejar as cookielib
else:
import urlparse as urllib_parse
from cgi import escape
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import cookielib
# Dealing with the differences in items/iteritems
if _PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def items(d):
return list(d.items())
xrange = range
izip = zip
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def items(d):
return d.items()
xrange = xrange
from itertools import izip
izip # shh pyflakes
iteritems.__doc__ = """
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
itervalues.__doc__ = """
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
items.__doc__ = """
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
def _keys(d):
"""
Return a list of the keys of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
if _PY3:
return list(d.keys())
else:
return d.keys()
def bytesEnviron():
"""
Return a L{dict} of L{os.environ} where all text-strings are encoded into
L{bytes}.
"""
if not _PY3:
# On py2, nothing to do.
return dict(os.environ)
target = dict()
for x, y in os.environ.items():
target[os.environ.encodekey(x)] = os.environ.encodevalue(y)
return target
def _constructMethod(cls, name, self):
"""
Construct a bound method.
@param cls: The class that the method should be bound to.
@type cls: L{types.ClassType} or L{type}.
@param name: The name of the method.
@type name: native L{str}
@param self: The object that the method is bound to.
@type self: any object
@return: a bound method
@rtype: L{types.MethodType}
"""
func = cls.__dict__[name]
if _PY3:
return _MethodType(func, self)
return _MethodType(func, self, cls)
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.compat",
"OrderedDict")
if _PY3:
from base64 import encodebytes as _b64encodebytes
from base64 import decodebytes as _b64decodebytes
else:
from base64 import encodestring as _b64encodebytes
from base64 import decodestring as _b64decodebytes
def _bytesChr(i):
"""
Like L{chr} but always works on ASCII, returning L{bytes}.
@param i: The ASCII code point to return.
@type i: L{int}
@rtype: L{bytes}
"""
if _PY3:
return bytes([i])
else:
return chr(i)
try:
from sys import intern
except ImportError:
intern = intern
def _coercedUnicode(s):
"""
Coerce ASCII-only byte strings into unicode for Python 2.
In Python 2 C{unicode(b'bytes')} returns a unicode string C{'bytes'}. In
Python 3, the equivalent C{str(b'bytes')} will return C{"b'bytes'"}
instead. This function mimics the behavior for Python 2. It will decode the
byte string as ASCII. In Python 3 it simply raises a L{TypeError} when
passing a byte string. Unicode strings are returned as-is.
@param s: The string to coerce.
@type s: L{bytes} or L{unicode}
@raise UnicodeError: The input L{bytes} is not ASCII decodable.
@raise TypeError: The input is L{bytes} on Python 3.
"""
if isinstance(s, bytes):
if _PY3:
raise TypeError("Expected str not %r (bytes)" % (s,))
else:
return s.decode('ascii')
else:
return s
if _PY3:
unichr = chr
raw_input = input
else:
unichr = unichr
raw_input = raw_input
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"OrderedDict",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
"items",
"iteritems",
"itervalues",
"xrange",
"urllib_parse",
"bytesEnviron",
"escape",
"urlquote",
"urlunquote",
"cookielib",
"_keys",
"_b64encodebytes",
"_b64decodebytes",
"_bytesChr",
"_coercedUnicode",
"intern",
"unichr",
"raw_input",
]
| 26.090571 | 79 | 0.629369 |
from __future__ import absolute_import, division
import inspect
import os
import platform
import socket
import string
import struct
import sys
from types import MethodType as _MethodType
from io import TextIOBase, IOBase
if sys.version_info < (3, 0):
_PY3 = False
else:
_PY3 = True
if platform.python_implementation() == 'PyPy':
_PYPY = True
else:
_PYPY = False
def _shouldEnableNewStyle():
value = os.environ.get('TWISTED_NEWSTYLE', '')
if value in ['', 'no', 'false', 'False', '0']:
return False
else:
return True
_EXPECT_NEWSTYLE = _PY3 or _shouldEnableNewStyle()
def currentframe(n=0):
f = inspect.currentframe()
for x in range(n + 1):
f = f.f_back
return f
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
bestLen = None
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.AF_INET6
except AttributeError:
socket.AF_INET6 = 'AF_INET6'
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
adict = dict
if _PY3:
l adict, inet_pton, inet_ntop
set = set
frozenset = frozenset
try:
from functools import reduce
except ImportError:
reduce = reduce
def execfile(filename, globals, locals=None):
if locals is None:
locals = globals
with open(filename, "rbU") as fin:
source = fin.read()
code = compile(source, filename, "exec")
exec(code, globals, locals)
try:
cmp = cmp
except NameError:
def cmp(a, b):
"""
Compare two objects.
Returns a negative number if C{a < b}, zero if they are equal, and a
positive number if C{a > b}.
"""
if a < b:
return -1
elif a == b:
return 0
else:
return 1
def comparable(klass):
if not _PY3:
return klass
def __eq__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self.__cmp__(other)
if c is NotImplemented:
return c
return c >= 0
klass.__lt__ = __lt__
klass.__gt__ = __gt__
klass.__le__ = __le__
klass.__ge__ = __ge__
klass.__eq__ = __eq__
klass.__ne__ = __ne__
return klass
if _PY3:
unicode = str
long = int
else:
unicode = unicode
long = long
def ioType(fileIshObject, default=unicode):
if isinstance(fileIshObject, TextIOBase):
return unicode
if isinstance(fileIshObject, IOBase):
return bytes
encoding = getattr(fileIshObject, 'encoding', None)
import codecs
if isinstance(fileIshObject, (codecs.StreamReader, codecs.StreamWriter)):
# On StreamReaderWriter, the 'encoding' attribute has special meaning;
# it is unambiguously unicode.
if encoding:
return unicode
else:
return bytes
if not _PY3:
# Special case: if we have an encoding file, we can *give* it unicode,
# but we can't expect to *get* unicode.
if isinstance(fileIshObject, file):
if encoding is not None:
return basestring
else:
return bytes
from cStringIO import InputType, OutputType
from StringIO import StringIO
if isinstance(fileIshObject, (StringIO, InputType, OutputType)):
return bytes
return default
def nativeString(s):
if not isinstance(s, (bytes, unicode)):
raise TypeError("%r is neither bytes nor unicode" % s)
if _PY3:
if isinstance(s, bytes):
return s.decode("ascii")
else:
s.encode("ascii")
else:
if isinstance(s, unicode):
return s.encode("ascii")
else:
# Ensure we're limited to ASCII subset:
s.decode("ascii")
return s
def _matchingString(constantString, inputString):
if isinstance(constantString, bytes):
otherType = constantString.decode("ascii")
else:
otherType = constantString.encode("ascii")
if type(constantString) == type(inputString):
return constantString
else:
return otherType
if _PY3:
def reraise(exception, traceback):
raise exception.with_traceback(traceback)
else:
exec("""def reraise(exception, traceback):
raise exception.__class__, exception, traceback""")
reraise.__doc__ = """
Re-raise an exception, with an optional traceback, in a way that is compatible
with both Python 2 and Python 3.
Note that on Python 3, re-raised exceptions will be mutated, with their
C{__traceback__} attribute being set.
@param exception: The exception instance.
@param traceback: The traceback to use, or L{None} indicating a new traceback.
"""
if _PY3:
from io import StringIO as NativeStringIO
else:
from io import BytesIO as NativeStringIO
# different than Python 2's:
if _PY3:
def iterbytes(originalBytes):
for i in range(len(originalBytes)):
yield originalBytes[i:i+1]
def intToBytes(i):
return ("%d" % i).encode("ascii")
def lazyByteSlice(object, offset=0, size=None):
if size is None:
return object[offset:]
else:
return object[offset:(offset + size)]
def networkString(s):
if not isinstance(s, unicode):
raise TypeError("Can only convert text to bytes on Python 3")
return s.encode('ascii')
else:
def iterbytes(originalBytes):
return originalBytes
def intToBytes(i):
return b"%d" % i
lazyByteSlice = buffer
def networkString(s):
if not isinstance(s, str):
raise TypeError("Can only pass-through bytes on Python 2")
s.decode('ascii')
return s
iterbytes.__doc__ = """
Return an iterable wrapper for a C{bytes} object that provides the behavior of
iterating over C{bytes} on Python 2.
In particular, the results of iteration are the individual bytes (rather than
integers as on Python 3).
@param originalBytes: A C{bytes} object that will be wrapped.
"""
intToBytes.__doc__ = """
Convert the given integer into C{bytes}, as ASCII-encoded Arab numeral.
In other words, this is equivalent to calling C{bytes} in Python 2 on an
integer.
@param i: The C{int} to convert to C{bytes}.
@rtype: C{bytes}
"""
networkString.__doc__ = """
Convert the native string type to C{bytes} if it is not already C{bytes} using
ASCII encoding if conversion is necessary.
This is useful for sending text-like bytes that are constructed using string
interpolation. For example, this is safe on Python 2 and Python 3:
networkString("Hello %d" % (n,))
@param s: A native string to convert to bytes if necessary.
@type s: C{str}
@raise UnicodeError: The input string is not ASCII encodable/decodable.
@raise TypeError: The input is neither C{bytes} nor C{unicode}.
@rtype: C{bytes}
"""
try:
StringType = basestring
except NameError:
# Python 3+
StringType = str
try:
from types import InstanceType
except ImportError:
# Python 3+
InstanceType = object
try:
from types import FileType
except ImportError:
# Python 3+
FileType = IOBase
if _PY3:
import urllib.parse as urllib_parse
from html import escape
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from http import cookiejar as cookielib
else:
import urlparse as urllib_parse
from cgi import escape
from urllib import quote as urlquote
from urllib import unquote as urlunquote
import cookielib
# Dealing with the differences in items/iteritems
if _PY3:
def iteritems(d):
return d.items()
def itervalues(d):
return d.values()
def items(d):
return list(d.items())
xrange = range
izip = zip
else:
def iteritems(d):
return d.iteritems()
def itervalues(d):
return d.itervalues()
def items(d):
return d.items()
xrange = xrange
from itertools import izip
izip # shh pyflakes
iteritems.__doc__ = """
Return an iterable of the items of C{d}.
@type d: L{dict}
@rtype: iterable
"""
itervalues.__doc__ = """
Return an iterable of the values of C{d}.
@type d: L{dict}
@rtype: iterable
"""
items.__doc__ = """
Return a list of the items of C{d}.
@type d: L{dict}
@rtype: L{list}
"""
def _keys(d):
if _PY3:
return list(d.keys())
else:
return d.keys()
def bytesEnviron():
if not _PY3:
# On py2, nothing to do.
return dict(os.environ)
target = dict()
for x, y in os.environ.items():
target[os.environ.encodekey(x)] = os.environ.encodevalue(y)
return target
def _constructMethod(cls, name, self):
func = cls.__dict__[name]
if _PY3:
return _MethodType(func, self)
return _MethodType(func, self, cls)
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from collections import OrderedDict
deprecatedModuleAttribute(
Version("Twisted", 15, 5, 0),
"Use collections.OrderedDict instead.",
"twisted.python.compat",
"OrderedDict")
if _PY3:
from base64 import encodebytes as _b64encodebytes
from base64 import decodebytes as _b64decodebytes
else:
from base64 import encodestring as _b64encodebytes
from base64 import decodestring as _b64decodebytes
def _bytesChr(i):
if _PY3:
return bytes([i])
else:
return chr(i)
try:
from sys import intern
except ImportError:
intern = intern
def _coercedUnicode(s):
if isinstance(s, bytes):
if _PY3:
raise TypeError("Expected str not %r (bytes)" % (s,))
else:
return s.decode('ascii')
else:
return s
if _PY3:
unichr = chr
raw_input = input
else:
unichr = unichr
raw_input = raw_input
__all__ = [
"reraise",
"execfile",
"frozenset",
"reduce",
"set",
"cmp",
"comparable",
"OrderedDict",
"nativeString",
"NativeStringIO",
"networkString",
"unicode",
"iterbytes",
"intToBytes",
"lazyByteSlice",
"StringType",
"InstanceType",
"FileType",
"items",
"iteritems",
"itervalues",
"xrange",
"urllib_parse",
"bytesEnviron",
"escape",
"urlquote",
"urlunquote",
"cookielib",
"_keys",
"_b64encodebytes",
"_b64decodebytes",
"_bytesChr",
"_coercedUnicode",
"intern",
"unichr",
"raw_input",
]
| true | true |
f73391bf5a7e433e31d3b050efc800afce4dbb19 | 1,202 | py | Python | python/saddle-points/saddle_points.py | tamireinhorn/exercism | 3ca78b262ad590b67c75c5d1cd83db02bc2d1e6e | [
"MIT"
] | null | null | null | python/saddle-points/saddle_points.py | tamireinhorn/exercism | 3ca78b262ad590b67c75c5d1cd83db02bc2d1e6e | [
"MIT"
] | 2 | 2021-12-18T16:31:51.000Z | 2021-12-18T16:33:33.000Z | python/saddle-points/saddle_points.py | tamireinhorn/Exercism | 3a3d5744e88ab4457df4e6ac20d772d8c50c43da | [
"MIT"
] | null | null | null | from copy import copy
def saddle_points(matrix):
if not matrix:
return []
if len(set(map(len, matrix))) != 1:
raise ValueError('irregular matrix')
# Saddle point is a point where the element is the biggest in its row but the smallest in its column.
# First off, I guess I'd create columns:
# We revert the matrix
copy_matrix = [copy(row) for row in matrix[::-1]]
columns = [[] for i in range(len(copy_matrix[0]))]
while copy_matrix:
current = copy_matrix.pop()
for column in columns:
column.append(current.pop())
saddles = []
# Iterate over columns
for column_index, column in enumerate(columns[::-1]):
column_min = min(column) # The minimal is our candidate for saddle
row_indices = (index for index, value in enumerate(column) if value == column_min) # Get every time the minimal is in that column
for row_index in row_indices:
if max(matrix[row_index]) == column_min: # We get the row and see if its max is that min, if so, it's a saddle.
point = {"row": row_index + 1, "column": column_index + 1}
saddles.append(point)
return saddles
| 44.518519 | 137 | 0.634775 | from copy import copy
def saddle_points(matrix):
if not matrix:
return []
if len(set(map(len, matrix))) != 1:
raise ValueError('irregular matrix')
# We revert the matrix
copy_matrix = [copy(row) for row in matrix[::-1]]
columns = [[] for i in range(len(copy_matrix[0]))]
while copy_matrix:
current = copy_matrix.pop()
for column in columns:
column.append(current.pop())
saddles = []
# Iterate over columns
for column_index, column in enumerate(columns[::-1]):
column_min = min(column) # The minimal is our candidate for saddle
row_indices = (index for index, value in enumerate(column) if value == column_min) # Get every time the minimal is in that column
for row_index in row_indices:
if max(matrix[row_index]) == column_min: # We get the row and see if its max is that min, if so, it's a saddle.
point = {"row": row_index + 1, "column": column_index + 1}
saddles.append(point)
return saddles
| true | true |
f7339293297efa587024ab7b9ae02ecf9e0013db | 14,011 | py | Python | platypush/plugins/media/mpv.py | RichardChiang/platypush | 1777ebb0516118cdef20046a92caab496fa7c6cb | [
"MIT"
] | null | null | null | platypush/plugins/media/mpv.py | RichardChiang/platypush | 1777ebb0516118cdef20046a92caab496fa7c6cb | [
"MIT"
] | null | null | null | platypush/plugins/media/mpv.py | RichardChiang/platypush | 1777ebb0516118cdef20046a92caab496fa7c6cb | [
"MIT"
] | null | null | null | import os
import threading
from platypush.context import get_bus
from platypush.plugins.media import PlayerState, MediaPlugin
from platypush.message.event.media import MediaPlayEvent, MediaPlayRequestEvent, \
MediaPauseEvent, MediaStopEvent, NewPlayingMediaEvent, MediaSeekEvent
from platypush.plugins import action
class MediaMpvPlugin(MediaPlugin):
"""
Plugin to control MPV instances
Requires:
* **python-mpv** (``pip install python-mpv``)
* **mpv** executable on your system
"""
_default_mpv_args = {
'ytdl': True,
'start_event_thread': True,
}
def __init__(self, args=None, *argv, **kwargs):
"""
Create the MPV wrapper.
:param args: Default arguments that will be passed to the mpv executable
as a key-value dict (names without the `--` prefix). See `man mpv`
for available options.
:type args: dict[str, str]
"""
super().__init__(*argv, **kwargs)
self.args = self._default_mpv_args
if args:
# noinspection PyTypeChecker
self.args.update(args)
self._player = None
self._playback_rebounce_event = threading.Event()
self._on_stop_callbacks = []
def _init_mpv(self, args=None):
import mpv
mpv_args = self.args.copy()
if args:
mpv_args.update(args)
for k, v in self._env.items():
os.environ[k] = v
self._player = mpv.MPV(**mpv_args)
# noinspection PyProtectedMember
self._player._event_callbacks += [self._event_callback()]
@staticmethod
def _post_event(evt_type, **evt):
bus = get_bus()
bus.post(evt_type(player='local', plugin='media.mpv', **evt))
def _event_callback(self):
def callback(event):
from mpv import MpvEventID as Event
from mpv import MpvEventEndFile as EndFile
self.logger.info('Received mpv event: {}'.format(event))
evt = event.get('event_id')
if not evt:
return
if (evt == Event.FILE_LOADED or evt == Event.START_FILE) and self._get_current_resource():
self._playback_rebounce_event.set()
self._post_event(NewPlayingMediaEvent, resource=self._get_current_resource(),
title=self._player.filename)
elif evt == Event.PLAYBACK_RESTART:
self._playback_rebounce_event.set()
elif evt == Event.PAUSE:
self._post_event(MediaPauseEvent, resource=self._get_current_resource(), title=self._player.filename)
elif evt == Event.UNPAUSE:
self._post_event(MediaPlayEvent, resource=self._get_current_resource(), title=self._player.filename)
elif evt == Event.SHUTDOWN or (
evt == Event.END_FILE and event.get('event', {}).get('reason') in
[EndFile.EOF_OR_INIT_FAILURE, EndFile.ABORTED, EndFile.QUIT]):
playback_rebounced = self._playback_rebounce_event.wait(timeout=0.5)
if playback_rebounced:
self._playback_rebounce_event.clear()
return
self._player = None
self._post_event(MediaStopEvent)
for cbk in self._on_stop_callbacks:
cbk()
elif evt == Event.SEEK:
self._post_event(MediaSeekEvent, position=self._player.playback_time)
return callback
@action
def execute(self, cmd, **args):
"""
Execute a raw mpv command.
"""
if not self._player:
return None, 'No mpv instance is running'
return self._player.command(cmd, *args)
@action
def play(self, resource, subtitles=None, **args):
"""
Play a resource.
:param resource: Resource to play - can be a local file or a remote URL
:type resource: str
:param subtitles: Path to optional subtitle file
:type subtitles: str
:param args: Extra runtime arguments that will be passed to the
mpv executable as a key-value dict (keys without `--` prefix)
:type args: dict[str,str]
"""
get_bus().post(MediaPlayRequestEvent(resource=resource))
self._init_mpv(args)
if subtitles:
args['sub_file'] = self.get_subtitles_file(subtitles)
resource = self._get_resource(resource)
if resource.startswith('file://'):
resource = resource[7:]
assert self._player, 'The player is not ready'
self._player.play(resource)
if self.volume:
self.set_volume(volume=self.volume)
return self.status()
@action
def pause(self):
""" Toggle the paused state """
if not self._player:
return None, 'No mpv instance is running'
self._player.pause = not self._player.pause
return self.status()
@action
def quit(self):
""" Stop and quit the player """
self._stop_torrent()
if not self._player:
return None, 'No mpv instance is running'
self._player.quit()
self._player.terminate()
self._player = None
return {'state': PlayerState.STOP.value}
@action
def stop(self):
""" Stop and quit the player """
return self.quit()
@action
def voldown(self, step=10.0):
""" Volume down by (default: 10)% """
if not self._player:
return None, 'No mpv instance is running'
return self.set_volume(self._player.volume - step)
@action
def volup(self, step=10.0):
""" Volume up by (default: 10)% """
if not self._player:
return None, 'No mpv instance is running'
return self.set_volume(self._player.volume + step)
@action
def set_volume(self, volume):
"""
Set the volume
:param volume: Volume value between 0 and 100
:type volume: float
"""
if not self._player:
return None, 'No mpv instance is running'
volume = max(0, min([self._player.volume_max, volume]))
self._player.volume = volume
return self.status()
@action
def seek(self, position):
"""
Seek backward/forward by the specified number of seconds
:param position: Number of seconds relative to the current cursor
:type position: int
"""
if not self._player:
return None, 'No mpv instance is running'
if not self._player.seekable:
return None, 'The resource is not seekable'
pos = min(self._player.time_pos + self._player.time_remaining,
max(0, position))
self._player.time_pos = pos
return self.status()
@action
def back(self, offset=60.0):
""" Back by (default: 60) seconds """
if not self._player:
return None, 'No mpv instance is running'
if not self._player.seekable:
return None, 'The resource is not seekable'
pos = max(0, self._player.time_pos - offset)
return self.seek(pos)
@action
def forward(self, offset=60.0):
""" Forward by (default: 60) seconds """
if not self._player:
return None, 'No mpv instance is running'
if not self._player.seekable:
return None, 'The resource is not seekable'
pos = min(self._player.time_pos + self._player.time_remaining,
self._player.time_pos + offset)
return self.seek(pos)
@action
def next(self):
""" Play the next item in the queue """
if not self._player:
return None, 'No mpv instance is running'
self._player.playlist_next()
@action
def prev(self):
""" Play the previous item in the queue """
if not self._player:
return None, 'No mpv instance is running'
self._player.playlist_prev()
@action
def toggle_subtitles(self, visible=None):
""" Toggle the subtitles visibility """
return self.toggle_property('sub_visibility')
@action
def add_subtitles(self, filename):
""" Add a subtitles file """
return self._player.sub_add(filename)
@action
def remove_subtitles(self, sub_id):
""" Remove a subtitles track by id """
return self._player.sub_remove(sub_id)
@action
def toggle_fullscreen(self):
""" Toggle the fullscreen mode """
return self.toggle_property('fullscreen')
# noinspection PyShadowingBuiltins
@action
def toggle_property(self, property):
"""
Toggle or sets the value of an mpv property (e.g. fullscreen,
sub_visibility etc.). See ``man mpv`` for a full list of properties
:param property: Property to toggle
"""
if not self._player:
return None, 'No mpv instance is running'
if not hasattr(self._player, property):
self.logger.warning('No such mpv property: {}'.format(property))
value = not getattr(self._player, property)
setattr(self._player, property, value)
return {property: value}
# noinspection PyShadowingBuiltins
@action
def get_property(self, property):
"""
Get a player property (e.g. pause, fullscreen etc.). See
``man mpv`` for a full list of the available properties
"""
if not self._player:
return None, 'No mpv instance is running'
return getattr(self._player, property)
@action
def set_property(self, **props):
"""
Set the value of an mpv property (e.g. fullscreen, sub_visibility
etc.). See ``man mpv`` for a full list of properties
:param props: Key-value args for the properties to set
:type props: dict
"""
if not self._player:
return None, 'No mpv instance is running'
for k, v in props.items():
setattr(self._player, k, v)
return props
@action
def set_subtitles(self, filename, *args, **kwargs):
""" Sets media subtitles from filename """
# noinspection PyTypeChecker
return self.set_property(subfile=filename, sub_visibility=True)
@action
def remove_subtitles(self):
""" Removes (hides) the subtitles """
if not self._player:
return None, 'No mpv instance is running'
self._player.sub_visibility = False
@action
def is_playing(self):
"""
:returns: True if it's playing, False otherwise
"""
if not self._player:
return False
return not self._player.pause
@action
def load(self, resource, **args):
"""
Load/queue a resource/video to the player
"""
if not self._player:
return self.play(resource, **args)
return self._player.loadfile(resource, mode='append-play')
@action
def mute(self):
""" Toggle mute state """
if not self._player:
return None, 'No mpv instance is running'
mute = not self._player.mute
self._player.mute = mute
return {'muted': mute}
@action
def set_position(self, position):
"""
Seek backward/forward to the specified absolute position (same as ``seek``)
"""
return self.seek(position)
@action
def status(self):
"""
Get the current player state.
:returns: A dictionary containing the current state.
Example::
output = {
"filename": "filename or stream URL",
"state": "play" # or "stop" or "pause"
}
"""
if not self._player or not hasattr(self._player, 'pause'):
return {'state': PlayerState.STOP.value}
return {
'audio_channels': getattr(self._player, 'audio_channels'),
'audio_codec': getattr(self._player, 'audio_codec_name'),
'delay': getattr(self._player, 'delay'),
'duration': getattr(self._player, 'playback_time', 0) + getattr(self._player, 'playtime_remaining', 0)
if getattr(self._player, 'playtime_remaining') else None,
'filename': getattr(self._player, 'filename'),
'file_size': getattr(self._player, 'file_size'),
'fullscreen': getattr(self._player, 'fs'),
'mute': getattr(self._player, 'mute'),
'name': getattr(self._player, 'name'),
'pause': getattr(self._player, 'pause'),
'percent_pos': getattr(self._player, 'percent_pos'),
'position': getattr(self._player, 'playback_time'),
'seekable': getattr(self._player, 'seekable'),
'state': (PlayerState.PAUSE.value if self._player.pause else PlayerState.PLAY.value),
'title': getattr(self._player, 'media_title') or getattr(self._player, 'filename'),
'url': self._get_current_resource(),
'video_codec': getattr(self._player, 'video_codec'),
'video_format': getattr(self._player, 'video_format'),
'volume': getattr(self._player, 'volume'),
'volume_max': getattr(self._player, 'volume_max'),
'width': getattr(self._player, 'width'),
}
def on_stop(self, callback):
self._on_stop_callbacks.append(callback)
def _get_current_resource(self):
if not self._player or not self._player.stream_path:
return
return ('file://' if os.path.isfile(self._player.stream_path)
else '') + self._player.stream_path
def _get_resource(self, resource):
if self._is_youtube_resource(resource):
return resource # mpv can handle YouTube streaming natively
return super()._get_resource(resource)
# vim:sw=4:ts=4:et:
| 32.735981 | 117 | 0.593177 | import os
import threading
from platypush.context import get_bus
from platypush.plugins.media import PlayerState, MediaPlugin
from platypush.message.event.media import MediaPlayEvent, MediaPlayRequestEvent, \
MediaPauseEvent, MediaStopEvent, NewPlayingMediaEvent, MediaSeekEvent
from platypush.plugins import action
class MediaMpvPlugin(MediaPlugin):
_default_mpv_args = {
'ytdl': True,
'start_event_thread': True,
}
def __init__(self, args=None, *argv, **kwargs):
super().__init__(*argv, **kwargs)
self.args = self._default_mpv_args
if args:
self.args.update(args)
self._player = None
self._playback_rebounce_event = threading.Event()
self._on_stop_callbacks = []
def _init_mpv(self, args=None):
import mpv
mpv_args = self.args.copy()
if args:
mpv_args.update(args)
for k, v in self._env.items():
os.environ[k] = v
self._player = mpv.MPV(**mpv_args)
self._player._event_callbacks += [self._event_callback()]
@staticmethod
def _post_event(evt_type, **evt):
bus = get_bus()
bus.post(evt_type(player='local', plugin='media.mpv', **evt))
def _event_callback(self):
def callback(event):
from mpv import MpvEventID as Event
from mpv import MpvEventEndFile as EndFile
self.logger.info('Received mpv event: {}'.format(event))
evt = event.get('event_id')
if not evt:
return
if (evt == Event.FILE_LOADED or evt == Event.START_FILE) and self._get_current_resource():
self._playback_rebounce_event.set()
self._post_event(NewPlayingMediaEvent, resource=self._get_current_resource(),
title=self._player.filename)
elif evt == Event.PLAYBACK_RESTART:
self._playback_rebounce_event.set()
elif evt == Event.PAUSE:
self._post_event(MediaPauseEvent, resource=self._get_current_resource(), title=self._player.filename)
elif evt == Event.UNPAUSE:
self._post_event(MediaPlayEvent, resource=self._get_current_resource(), title=self._player.filename)
elif evt == Event.SHUTDOWN or (
evt == Event.END_FILE and event.get('event', {}).get('reason') in
[EndFile.EOF_OR_INIT_FAILURE, EndFile.ABORTED, EndFile.QUIT]):
playback_rebounced = self._playback_rebounce_event.wait(timeout=0.5)
if playback_rebounced:
self._playback_rebounce_event.clear()
return
self._player = None
self._post_event(MediaStopEvent)
for cbk in self._on_stop_callbacks:
cbk()
elif evt == Event.SEEK:
self._post_event(MediaSeekEvent, position=self._player.playback_time)
return callback
@action
def execute(self, cmd, **args):
if not self._player:
return None, 'No mpv instance is running'
return self._player.command(cmd, *args)
@action
def play(self, resource, subtitles=None, **args):
get_bus().post(MediaPlayRequestEvent(resource=resource))
self._init_mpv(args)
if subtitles:
args['sub_file'] = self.get_subtitles_file(subtitles)
resource = self._get_resource(resource)
if resource.startswith('file://'):
resource = resource[7:]
assert self._player, 'The player is not ready'
self._player.play(resource)
if self.volume:
self.set_volume(volume=self.volume)
return self.status()
@action
def pause(self):
if not self._player:
return None, 'No mpv instance is running'
self._player.pause = not self._player.pause
return self.status()
@action
def quit(self):
self._stop_torrent()
if not self._player:
return None, 'No mpv instance is running'
self._player.quit()
self._player.terminate()
self._player = None
return {'state': PlayerState.STOP.value}
@action
def stop(self):
return self.quit()
@action
def voldown(self, step=10.0):
if not self._player:
return None, 'No mpv instance is running'
return self.set_volume(self._player.volume - step)
@action
def volup(self, step=10.0):
if not self._player:
return None, 'No mpv instance is running'
return self.set_volume(self._player.volume + step)
@action
def set_volume(self, volume):
if not self._player:
return None, 'No mpv instance is running'
volume = max(0, min([self._player.volume_max, volume]))
self._player.volume = volume
return self.status()
@action
def seek(self, position):
if not self._player:
return None, 'No mpv instance is running'
if not self._player.seekable:
return None, 'The resource is not seekable'
pos = min(self._player.time_pos + self._player.time_remaining,
max(0, position))
self._player.time_pos = pos
return self.status()
@action
def back(self, offset=60.0):
if not self._player:
return None, 'No mpv instance is running'
if not self._player.seekable:
return None, 'The resource is not seekable'
pos = max(0, self._player.time_pos - offset)
return self.seek(pos)
@action
def forward(self, offset=60.0):
if not self._player:
return None, 'No mpv instance is running'
if not self._player.seekable:
return None, 'The resource is not seekable'
pos = min(self._player.time_pos + self._player.time_remaining,
self._player.time_pos + offset)
return self.seek(pos)
@action
def next(self):
if not self._player:
return None, 'No mpv instance is running'
self._player.playlist_next()
@action
def prev(self):
if not self._player:
return None, 'No mpv instance is running'
self._player.playlist_prev()
@action
def toggle_subtitles(self, visible=None):
return self.toggle_property('sub_visibility')
@action
def add_subtitles(self, filename):
return self._player.sub_add(filename)
@action
def remove_subtitles(self, sub_id):
return self._player.sub_remove(sub_id)
@action
def toggle_fullscreen(self):
return self.toggle_property('fullscreen')
@action
def toggle_property(self, property):
if not self._player:
return None, 'No mpv instance is running'
if not hasattr(self._player, property):
self.logger.warning('No such mpv property: {}'.format(property))
value = not getattr(self._player, property)
setattr(self._player, property, value)
return {property: value}
@action
def get_property(self, property):
if not self._player:
return None, 'No mpv instance is running'
return getattr(self._player, property)
@action
def set_property(self, **props):
if not self._player:
return None, 'No mpv instance is running'
for k, v in props.items():
setattr(self._player, k, v)
return props
@action
def set_subtitles(self, filename, *args, **kwargs):
return self.set_property(subfile=filename, sub_visibility=True)
@action
def remove_subtitles(self):
if not self._player:
return None, 'No mpv instance is running'
self._player.sub_visibility = False
@action
def is_playing(self):
if not self._player:
return False
return not self._player.pause
@action
def load(self, resource, **args):
if not self._player:
return self.play(resource, **args)
return self._player.loadfile(resource, mode='append-play')
@action
def mute(self):
if not self._player:
return None, 'No mpv instance is running'
mute = not self._player.mute
self._player.mute = mute
return {'muted': mute}
@action
def set_position(self, position):
return self.seek(position)
@action
def status(self):
if not self._player or not hasattr(self._player, 'pause'):
return {'state': PlayerState.STOP.value}
return {
'audio_channels': getattr(self._player, 'audio_channels'),
'audio_codec': getattr(self._player, 'audio_codec_name'),
'delay': getattr(self._player, 'delay'),
'duration': getattr(self._player, 'playback_time', 0) + getattr(self._player, 'playtime_remaining', 0)
if getattr(self._player, 'playtime_remaining') else None,
'filename': getattr(self._player, 'filename'),
'file_size': getattr(self._player, 'file_size'),
'fullscreen': getattr(self._player, 'fs'),
'mute': getattr(self._player, 'mute'),
'name': getattr(self._player, 'name'),
'pause': getattr(self._player, 'pause'),
'percent_pos': getattr(self._player, 'percent_pos'),
'position': getattr(self._player, 'playback_time'),
'seekable': getattr(self._player, 'seekable'),
'state': (PlayerState.PAUSE.value if self._player.pause else PlayerState.PLAY.value),
'title': getattr(self._player, 'media_title') or getattr(self._player, 'filename'),
'url': self._get_current_resource(),
'video_codec': getattr(self._player, 'video_codec'),
'video_format': getattr(self._player, 'video_format'),
'volume': getattr(self._player, 'volume'),
'volume_max': getattr(self._player, 'volume_max'),
'width': getattr(self._player, 'width'),
}
def on_stop(self, callback):
self._on_stop_callbacks.append(callback)
def _get_current_resource(self):
if not self._player or not self._player.stream_path:
return
return ('file://' if os.path.isfile(self._player.stream_path)
else '') + self._player.stream_path
def _get_resource(self, resource):
if self._is_youtube_resource(resource):
return resource
return super()._get_resource(resource)
| true | true |
f7339296a259556a6062ae990caf0bcc72efd96e | 1,061 | py | Python | cride/users/models/profiles.py | ChekeGT/Comparte-Ride | cb30f1cb6cdafe81fd61ff7539ecaa39f3751353 | [
"MIT"
] | 1 | 2019-09-26T22:49:51.000Z | 2019-09-26T22:49:51.000Z | cride/users/models/profiles.py | ChekeGT/Comparte-Ride | cb30f1cb6cdafe81fd61ff7539ecaa39f3751353 | [
"MIT"
] | 3 | 2021-06-08T22:54:10.000Z | 2022-01-13T03:33:36.000Z | cride/users/models/profiles.py | ChekeGT/Comparte-Ride | cb30f1cb6cdafe81fd61ff7539ecaa39f3751353 | [
"MIT"
] | null | null | null | """Profile model and related models declaration."""
# Django
from django.db import models
# Models
from cride.utils.models import CRideModel
from cride.users.models import User
class Profile(CRideModel):
"""Profile Model Declaration
It's a proxy model to the user but its difference
is that this one is for public data, so you could
find in the Profile things like bio, picture...
"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
picture = models.ImageField(
upload_to='users/pictures/',
blank=True,
null=True
)
biography = models.TextField(
max_length=500,
blank=True
)
# Statistics
rides_taken = models.PositiveIntegerField(default=0)
rides_offered = models.PositiveIntegerField(default=0)
reputation = models.FloatField(
default=5.0,
help_text="User reputation based on the rides that he has taken or offered."
)
def __str__(self):
"""Returns user's str representation"""
return str(self.user)
| 23.065217 | 84 | 0.67295 |
from django.db import models
from cride.utils.models import CRideModel
from cride.users.models import User
class Profile(CRideModel):
user = models.OneToOneField(User, on_delete=models.CASCADE)
picture = models.ImageField(
upload_to='users/pictures/',
blank=True,
null=True
)
biography = models.TextField(
max_length=500,
blank=True
)
rides_taken = models.PositiveIntegerField(default=0)
rides_offered = models.PositiveIntegerField(default=0)
reputation = models.FloatField(
default=5.0,
help_text="User reputation based on the rides that he has taken or offered."
)
def __str__(self):
return str(self.user)
| true | true |
f7339354144b687c585f31b180384991e55d7608 | 1,270 | py | Python | configs/classification/matching_net/mini_imagenet/matching-net_resnet12_1xb105_mini-imagenet_5way-5shot.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 376 | 2021-11-23T13:29:57.000Z | 2022-03-30T07:22:14.000Z | configs/classification/matching_net/mini_imagenet/matching-net_resnet12_1xb105_mini-imagenet_5way-5shot.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 51 | 2021-11-23T14:45:08.000Z | 2022-03-30T03:37:15.000Z | configs/classification/matching_net/mini_imagenet/matching-net_resnet12_1xb105_mini-imagenet_5way-5shot.py | BIGWangYuDong/mmfewshot | dac097afc92df176bc2de76b7c90968584865197 | [
"Apache-2.0"
] | 56 | 2021-11-23T14:02:27.000Z | 2022-03-31T09:01:50.000Z | _base_ = [
'../../_base_/meta_test/mini-imagenet_meta-test_5way-5shot.py',
'../../_base_/runtime/iter_based_runtime.py',
'../../_base_/schedules/adam_100k_iter.py'
]
img_size = 84
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', size=img_size),
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='Collect', keys=['img', 'gt_label'])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=8,
train=dict(
type='EpisodicDataset',
num_episodes=100000,
num_ways=5,
num_shots=5,
num_queries=16,
dataset=dict(
type='MiniImageNetDataset',
data_prefix='data/mini_imagenet',
subset='train',
pipeline=train_pipeline)),
test=dict(meta_test_cfg=dict(fast_test=True)))
model = dict(
type='MatchingNet',
backbone=dict(type='ResNet12'),
head=dict(type='MatchingHead'))
| 30.97561 | 77 | 0.634646 | _base_ = [
'../../_base_/meta_test/mini-imagenet_meta-test_5way-5shot.py',
'../../_base_/runtime/iter_based_runtime.py',
'../../_base_/schedules/adam_100k_iter.py'
]
img_size = 84
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='RandomResizedCrop', size=img_size),
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='Collect', keys=['img', 'gt_label'])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=8,
train=dict(
type='EpisodicDataset',
num_episodes=100000,
num_ways=5,
num_shots=5,
num_queries=16,
dataset=dict(
type='MiniImageNetDataset',
data_prefix='data/mini_imagenet',
subset='train',
pipeline=train_pipeline)),
test=dict(meta_test_cfg=dict(fast_test=True)))
model = dict(
type='MatchingNet',
backbone=dict(type='ResNet12'),
head=dict(type='MatchingHead'))
| true | true |
f733935b6223c301bbf13251c4a9f50ffb38b622 | 9,362 | py | Python | numba/cuda/kernels/reduction.py | auderson/numba | 3d67c9850ab56457f418cf40af6245fd9c337705 | [
"BSD-2-Clause"
] | 6,620 | 2015-01-04T08:51:04.000Z | 2022-03-31T12:52:18.000Z | numba/cuda/kernels/reduction.py | auderson/numba | 3d67c9850ab56457f418cf40af6245fd9c337705 | [
"BSD-2-Clause"
] | 6,457 | 2015-01-04T03:18:41.000Z | 2022-03-31T17:38:42.000Z | numba/cuda/kernels/reduction.py | auderson/numba | 3d67c9850ab56457f418cf40af6245fd9c337705 | [
"BSD-2-Clause"
] | 930 | 2015-01-25T02:33:03.000Z | 2022-03-30T14:10:32.000Z | """
A library written in CUDA Python for generating reduction kernels
"""
from numba.np.numpy_support import from_dtype
_WARPSIZE = 32
_NUMWARPS = 4
def _gpu_reduce_factory(fn, nbtype):
from numba import cuda
reduce_op = cuda.jit(device=True)(fn)
inner_sm_size = _WARPSIZE + 1 # plus one to avoid SM collision
max_blocksize = _NUMWARPS * _WARPSIZE
@cuda.jit(device=True)
def inner_warp_reduction(sm_partials, init):
"""
Compute reduction within a single warp
"""
tid = cuda.threadIdx.x
warpid = tid // _WARPSIZE
laneid = tid % _WARPSIZE
sm_this = sm_partials[warpid, :]
sm_this[laneid] = init
cuda.syncwarp()
width = _WARPSIZE // 2
while width:
if laneid < width:
old = sm_this[laneid]
sm_this[laneid] = reduce_op(old, sm_this[laneid + width])
cuda.syncwarp()
width //= 2
@cuda.jit(device=True)
def device_reduce_full_block(arr, partials, sm_partials):
"""
Partially reduce `arr` into `partials` using `sm_partials` as working
space. The algorithm goes like:
array chunks of 128: | 0 | 128 | 256 | 384 | 512 |
block-0: | x | | | x | |
block-1: | | x | | | x |
block-2: | | | x | | |
The array is divided into chunks of 128 (size of a threadblock).
The threadblocks consumes the chunks in roundrobin scheduling.
First, a threadblock loads a chunk into temp memory. Then, all
subsequent chunks are combined into the temp memory.
Once all chunks are processed. Inner-block reduction is performed
on the temp memory. So that, there will just be one scalar result
per block. The result from each block is stored to `partials` at
the dedicated slot.
"""
tid = cuda.threadIdx.x
blkid = cuda.blockIdx.x
blksz = cuda.blockDim.x
gridsz = cuda.gridDim.x
# block strided loop to compute the reduction
start = tid + blksz * blkid
stop = arr.size
step = blksz * gridsz
# load first value
tmp = arr[start]
# loop over all values in block-stride
for i in range(start + step, stop, step):
tmp = reduce_op(tmp, arr[i])
cuda.syncthreads()
# inner-warp reduction
inner_warp_reduction(sm_partials, tmp)
cuda.syncthreads()
# at this point, only the first slot for each warp in tsm_partials
# is valid.
# finish up block reduction
# warning: this is assuming 4 warps.
# assert numwarps == 4
if tid < 2:
sm_partials[tid, 0] = reduce_op(sm_partials[tid, 0],
sm_partials[tid + 2, 0])
cuda.syncwarp()
if tid == 0:
partials[blkid] = reduce_op(sm_partials[0, 0], sm_partials[1, 0])
@cuda.jit(device=True)
def device_reduce_partial_block(arr, partials, sm_partials):
"""
This computes reduction on `arr`.
This device function must be used by 1 threadblock only.
The blocksize must match `arr.size` and must not be greater than 128.
"""
tid = cuda.threadIdx.x
blkid = cuda.blockIdx.x
blksz = cuda.blockDim.x
warpid = tid // _WARPSIZE
laneid = tid % _WARPSIZE
size = arr.size
# load first value
tid = cuda.threadIdx.x
value = arr[tid]
sm_partials[warpid, laneid] = value
cuda.syncthreads()
if (warpid + 1) * _WARPSIZE < size:
# fully populated warps
inner_warp_reduction(sm_partials, value)
else:
# partially populated warps
# NOTE: this uses a very inefficient sequential algorithm
if laneid == 0:
sm_this = sm_partials[warpid, :]
base = warpid * _WARPSIZE
for i in range(1, size - base):
sm_this[0] = reduce_op(sm_this[0], sm_this[i])
cuda.syncthreads()
# finish up
if tid == 0:
num_active_warps = (blksz + _WARPSIZE - 1) // _WARPSIZE
result = sm_partials[0, 0]
for i in range(1, num_active_warps):
result = reduce_op(result, sm_partials[i, 0])
partials[blkid] = result
def gpu_reduce_block_strided(arr, partials, init, use_init):
"""
Perform reductions on *arr* and writing out partial reduction result
into *partials*. The length of *partials* is determined by the
number of threadblocks. The initial value is set with *init*.
Launch config:
Blocksize must be multiple of warpsize and it is limited to 4 warps.
"""
tid = cuda.threadIdx.x
sm_partials = cuda.shared.array((_NUMWARPS, inner_sm_size),
dtype=nbtype)
if cuda.blockDim.x == max_blocksize:
device_reduce_full_block(arr, partials, sm_partials)
else:
device_reduce_partial_block(arr, partials, sm_partials)
# deal with the initializer
if use_init and tid == 0 and cuda.blockIdx.x == 0:
partials[0] = reduce_op(partials[0], init)
return cuda.jit(gpu_reduce_block_strided)
class Reduce(object):
"""Create a reduction object that reduces values using a given binary
function. The binary function is compiled once and cached inside this
object. Keeping this object alive will prevent re-compilation.
"""
_cache = {}
def __init__(self, functor):
"""
:param functor: A function implementing a binary operation for
reduction. It will be compiled as a CUDA device
function using ``cuda.jit(device=True)``.
"""
self._functor = functor
def _compile(self, dtype):
key = self._functor, dtype
if key in self._cache:
kernel = self._cache[key]
else:
kernel = _gpu_reduce_factory(self._functor, from_dtype(dtype))
self._cache[key] = kernel
return kernel
def __call__(self, arr, size=None, res=None, init=0, stream=0):
"""Performs a full reduction.
:param arr: A host or device array.
:param size: Optional integer specifying the number of elements in
``arr`` to reduce. If this parameter is not specified, the
entire array is reduced.
:param res: Optional device array into which to write the reduction
result to. The result is written into the first element of
this array. If this parameter is specified, then no
communication of the reduction output takes place from the
device to the host.
:param init: Optional initial value for the reduction, the type of which
must match ``arr.dtype``.
:param stream: Optional CUDA stream in which to perform the reduction.
If no stream is specified, the default stream of 0 is
used.
:return: If ``res`` is specified, ``None`` is returned. Otherwise, the
result of the reduction is returned.
"""
from numba import cuda
# ensure 1d array
if arr.ndim != 1:
raise TypeError("only support 1D array")
# adjust array size
if size is not None:
arr = arr[:size]
init = arr.dtype.type(init) # ensure the right type
# return `init` if `arr` is empty
if arr.size < 1:
return init
kernel = self._compile(arr.dtype)
# Perform the reduction on the GPU
blocksize = _NUMWARPS * _WARPSIZE
size_full = (arr.size // blocksize) * blocksize
size_partial = arr.size - size_full
full_blockct = min(size_full // blocksize, _WARPSIZE * 2)
# allocate size of partials array
partials_size = full_blockct
if size_partial:
partials_size += 1
partials = cuda.device_array(shape=partials_size, dtype=arr.dtype)
if size_full:
# kernel for the fully populated threadblocks
kernel[full_blockct, blocksize, stream](arr[:size_full],
partials[:full_blockct],
init,
True)
if size_partial:
# kernel for partially populated threadblocks
kernel[1, size_partial, stream](arr[size_full:],
partials[full_blockct:],
init,
not full_blockct)
if partials.size > 1:
# finish up
kernel[1, partials_size, stream](partials, partials, init, False)
# handle return value
if res is not None:
res[:1].copy_to_device(partials[:1], stream=stream)
return
else:
return partials[0]
| 35.596958 | 80 | 0.561739 |
from numba.np.numpy_support import from_dtype
_WARPSIZE = 32
_NUMWARPS = 4
def _gpu_reduce_factory(fn, nbtype):
from numba import cuda
reduce_op = cuda.jit(device=True)(fn)
inner_sm_size = _WARPSIZE + 1
max_blocksize = _NUMWARPS * _WARPSIZE
@cuda.jit(device=True)
def inner_warp_reduction(sm_partials, init):
tid = cuda.threadIdx.x
warpid = tid // _WARPSIZE
laneid = tid % _WARPSIZE
sm_this = sm_partials[warpid, :]
sm_this[laneid] = init
cuda.syncwarp()
width = _WARPSIZE // 2
while width:
if laneid < width:
old = sm_this[laneid]
sm_this[laneid] = reduce_op(old, sm_this[laneid + width])
cuda.syncwarp()
width //= 2
@cuda.jit(device=True)
def device_reduce_full_block(arr, partials, sm_partials):
tid = cuda.threadIdx.x
blkid = cuda.blockIdx.x
blksz = cuda.blockDim.x
gridsz = cuda.gridDim.x
start = tid + blksz * blkid
stop = arr.size
step = blksz * gridsz
tmp = arr[start]
for i in range(start + step, stop, step):
tmp = reduce_op(tmp, arr[i])
cuda.syncthreads()
inner_warp_reduction(sm_partials, tmp)
cuda.syncthreads()
if tid < 2:
sm_partials[tid, 0] = reduce_op(sm_partials[tid, 0],
sm_partials[tid + 2, 0])
cuda.syncwarp()
if tid == 0:
partials[blkid] = reduce_op(sm_partials[0, 0], sm_partials[1, 0])
@cuda.jit(device=True)
def device_reduce_partial_block(arr, partials, sm_partials):
tid = cuda.threadIdx.x
blkid = cuda.blockIdx.x
blksz = cuda.blockDim.x
warpid = tid // _WARPSIZE
laneid = tid % _WARPSIZE
size = arr.size
tid = cuda.threadIdx.x
value = arr[tid]
sm_partials[warpid, laneid] = value
cuda.syncthreads()
if (warpid + 1) * _WARPSIZE < size:
inner_warp_reduction(sm_partials, value)
else:
if laneid == 0:
sm_this = sm_partials[warpid, :]
base = warpid * _WARPSIZE
for i in range(1, size - base):
sm_this[0] = reduce_op(sm_this[0], sm_this[i])
cuda.syncthreads()
if tid == 0:
num_active_warps = (blksz + _WARPSIZE - 1) // _WARPSIZE
result = sm_partials[0, 0]
for i in range(1, num_active_warps):
result = reduce_op(result, sm_partials[i, 0])
partials[blkid] = result
def gpu_reduce_block_strided(arr, partials, init, use_init):
tid = cuda.threadIdx.x
sm_partials = cuda.shared.array((_NUMWARPS, inner_sm_size),
dtype=nbtype)
if cuda.blockDim.x == max_blocksize:
device_reduce_full_block(arr, partials, sm_partials)
else:
device_reduce_partial_block(arr, partials, sm_partials)
if use_init and tid == 0 and cuda.blockIdx.x == 0:
partials[0] = reduce_op(partials[0], init)
return cuda.jit(gpu_reduce_block_strided)
class Reduce(object):
_cache = {}
def __init__(self, functor):
self._functor = functor
def _compile(self, dtype):
key = self._functor, dtype
if key in self._cache:
kernel = self._cache[key]
else:
kernel = _gpu_reduce_factory(self._functor, from_dtype(dtype))
self._cache[key] = kernel
return kernel
def __call__(self, arr, size=None, res=None, init=0, stream=0):
from numba import cuda
if arr.ndim != 1:
raise TypeError("only support 1D array")
if size is not None:
arr = arr[:size]
init = arr.dtype.type(init)
if arr.size < 1:
return init
kernel = self._compile(arr.dtype)
blocksize = _NUMWARPS * _WARPSIZE
size_full = (arr.size // blocksize) * blocksize
size_partial = arr.size - size_full
full_blockct = min(size_full // blocksize, _WARPSIZE * 2)
partials_size = full_blockct
if size_partial:
partials_size += 1
partials = cuda.device_array(shape=partials_size, dtype=arr.dtype)
if size_full:
kernel[full_blockct, blocksize, stream](arr[:size_full],
partials[:full_blockct],
init,
True)
if size_partial:
kernel[1, size_partial, stream](arr[size_full:],
partials[full_blockct:],
init,
not full_blockct)
if partials.size > 1:
kernel[1, partials_size, stream](partials, partials, init, False)
if res is not None:
res[:1].copy_to_device(partials[:1], stream=stream)
return
else:
return partials[0]
| true | true |
f7339427a053d8f4b9965edf88dc8405e5ffbbd3 | 9,358 | py | Python | pkg/pkg/stats/fisher_exact_nonunity.py | dlee0156/bilateral-connectome | 26fe165341bb79379fecdd8bc5d7b5bfe3983fdc | [
"MIT"
] | null | null | null | pkg/pkg/stats/fisher_exact_nonunity.py | dlee0156/bilateral-connectome | 26fe165341bb79379fecdd8bc5d7b5bfe3983fdc | [
"MIT"
] | null | null | null | pkg/pkg/stats/fisher_exact_nonunity.py | dlee0156/bilateral-connectome | 26fe165341bb79379fecdd8bc5d7b5bfe3983fdc | [
"MIT"
] | null | null | null | from scipy.stats import nchypergeom_fisher
import numpy as np
def fisher_exact_nonunity(table, alternative="two-sided", null_odds=1):
"""Perform a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements must be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
See the Notes for more details.
null_odds : float, optional (default=1)
A (possibly non-unity) null odds ratio.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table. This can be used as an alternative to
`fisher_exact` when the numbers in the table are large.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
boschloo_exact : Boschloo's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
*Null hypothesis and p-values*
The null hypothesis is that the input table is from the hypergeometric
distribution with parameters (as used in `hypergeom`)
``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the
input table is ``[[a, b], [c, d]]``. This distribution has support
``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values
in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x``
can be interpreted as the upper-left element of a 2x2 table, so the
tables in the distribution have form::
[ x n - x ]
[N - x M - (n + N) + x]
For example, if::
table = [6 2]
[1 4]
then the support is ``2 <= x <= 7``, and the tables in the distribution
are::
[2 6] [3 5] [4 4] [5 3] [6 2] [7 1]
[5 0] [4 1] [3 2] [2 3] [1 4] [0 5]
The probability of each table is given by the hypergeometric distribution
``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to
three significant digits)::
x 2 3 4 5 6 7
p 0.0163 0.163 0.408 0.326 0.0816 0.00466
These can be computed with::
>>> from scipy.stats import hypergeom
>>> table = np.array([[6, 2], [1, 4]])
>>> M = table.sum()
>>> n = table[0].sum()
>>> N = table[:, 0].sum()
>>> start, end = hypergeom.support(M, n, N)
>>> hypergeom.pmf(np.arange(start, end+1), M, n, N)
array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508,
0.004662 ])
The two-sided p-value is the probability that, under the null hypothesis,
a random table would have a probability equal to or less than the
probability of the input table. For our example, the probability of
the input table (where ``x = 6``) is 0.0816. The x values where the
probability does not exceed this are 2, 6 and 7, so the two-sided p-value
is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``::
>>> from scipy.stats import fisher_exact
>>> oddsr, p = fisher_exact(table, alternative='two-sided')
>>> p
0.10256410256410257
The one-sided p-value for ``alternative='greater'`` is the probability
that a random table has ``x >= a``, which in our example is ``x >= 6``,
or ``0.0816 + 0.00466 ~= 0.08626``::
>>> oddsr, p = fisher_exact(table, alternative='greater')
>>> p
0.08624708624708627
This is equivalent to computing the survival function of the
distribution at ``x = 5`` (one less than ``x`` from the input table,
because we want to include the probability of ``x = 6`` in the sum)::
>>> hypergeom.sf(5, M, n, N)
0.08624708624708627
For ``alternative='less'``, the one-sided p-value is the probability
that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example),
or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``::
>>> oddsr, p = fisher_exact(table, alternative='less')
>>> p
0.9953379953379957
This is equivalent to computing the cumulative distribution function
of the distribution at ``x = 6``:
>>> hypergeom.cdf(6, M, n, N)
0.9953379953379957
*Odds ratio*
The calculated odds ratio is different from the one R uses. This SciPy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> from scipy.stats import fisher_exact
>>> oddsratio, pvalue = fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
dist = nchypergeom_fisher
# int32 is not enough for the algorithm
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
rv = dist(n1 + n2, n1, n, null_odds)
def binary_search(n, n1, n2, side):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = rv.pmf(guess)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < rv.pmf(ng):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and rv.pmf(guess) < pexact * epsilon:
guess -= 1
while rv.pmf(guess) > pexact / epsilon:
guess += 1
else:
while rv.pmf(guess) < pexact * epsilon:
guess += 1
while guess > 0 and rv.pmf(guess) > pexact / epsilon:
guess -= 1
return guess
if alternative == "less":
pvalue = rv.cdf(c[0, 0])
elif alternative == "greater":
# Same formula as the 'less' case, but with the second column.
pvalue = rv.sf(c[0, 0] - 1)
elif alternative == "two-sided":
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = dist.pmf(c[0, 0], n1 + n2, n1, n, null_odds)
pmode = dist.pmf(mode, n1 + n2, n1, n, null_odds)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.0
elif c[0, 0] < mode:
plower = dist.cdf(c[0, 0], n1 + n2, n1, n, null_odds)
if dist.pmf(n, n1 + n2, n1, n, null_odds) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + dist.sf(guess - 1, n1 + n2, n1, n, null_odds)
else:
pupper = dist.sf(c[0, 0] - 1, n1 + n2, n1, n, null_odds)
if dist.pmf(0, n1 + n2, n1, n, null_odds) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + dist.cdf(guess, n1 + n2, n1, n, null_odds)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
| 41.22467 | 80 | 0.565292 | from scipy.stats import nchypergeom_fisher
import numpy as np
def fisher_exact_nonunity(table, alternative="two-sided", null_odds=1):
dist = nchypergeom_fisher
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
rv = dist(n1 + n2, n1, n, null_odds)
def binary_search(n, n1, n2, side):
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = rv.pmf(guess)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < rv.pmf(ng):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and rv.pmf(guess) < pexact * epsilon:
guess -= 1
while rv.pmf(guess) > pexact / epsilon:
guess += 1
else:
while rv.pmf(guess) < pexact * epsilon:
guess += 1
while guess > 0 and rv.pmf(guess) > pexact / epsilon:
guess -= 1
return guess
if alternative == "less":
pvalue = rv.cdf(c[0, 0])
elif alternative == "greater":
pvalue = rv.sf(c[0, 0] - 1)
elif alternative == "two-sided":
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = dist.pmf(c[0, 0], n1 + n2, n1, n, null_odds)
pmode = dist.pmf(mode, n1 + n2, n1, n, null_odds)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.0
elif c[0, 0] < mode:
plower = dist.cdf(c[0, 0], n1 + n2, n1, n, null_odds)
if dist.pmf(n, n1 + n2, n1, n, null_odds) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + dist.sf(guess - 1, n1 + n2, n1, n, null_odds)
else:
pupper = dist.sf(c[0, 0] - 1, n1 + n2, n1, n, null_odds)
if dist.pmf(0, n1 + n2, n1, n, null_odds) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + dist.cdf(guess, n1 + n2, n1, n, null_odds)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
| true | true |
f733950fa6f5f5f02c90aca71819e98db7ea1158 | 27,241 | py | Python | sanic/request.py | aericson/sanic | 4a416e177aa5037ba9436e53f531631707e87ea7 | [
"MIT"
] | null | null | null | sanic/request.py | aericson/sanic | 4a416e177aa5037ba9436e53f531631707e87ea7 | [
"MIT"
] | null | null | null | sanic/request.py | aericson/sanic | 4a416e177aa5037ba9436e53f531631707e87ea7 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
NamedTuple,
Optional,
Tuple,
Union,
)
from sanic_routing.route import Route # type: ignore
from sanic.models.http_types import Credentials
if TYPE_CHECKING: # no cov
from sanic.server import ConnInfo
from sanic.app import Sanic
import email.utils
import uuid
from collections import defaultdict
from http.cookies import SimpleCookie
from types import SimpleNamespace
from urllib.parse import parse_qs, parse_qsl, unquote, urlunparse
from httptools import parse_url # type: ignore
from sanic.compat import CancelledErrors, Header
from sanic.constants import DEFAULT_HTTP_CONTENT_TYPE
from sanic.exceptions import InvalidUsage, ServerError
from sanic.headers import (
AcceptContainer,
Options,
parse_accept,
parse_content_header,
parse_credentials,
parse_forwarded,
parse_host,
parse_xforwarded,
)
from sanic.http import Http, Stage
from sanic.log import error_logger, logger
from sanic.models.protocol_types import TransportProtocol
from sanic.response import BaseHTTPResponse, HTTPResponse
try:
from ujson import loads as json_loads # type: ignore
except ImportError:
from json import loads as json_loads # type: ignore
class RequestParameters(dict):
"""
Hosts a dict with lists as values where get returns the first
value of the list and getlist returns the whole shebang
"""
def get(self, name: str, default: Optional[Any] = None) -> Optional[Any]:
"""Return the first value, either the default or actual"""
return super().get(name, [default])[0]
def getlist(
self, name: str, default: Optional[Any] = None
) -> Optional[Any]:
"""
Return the entire list
"""
return super().get(name, default)
class Request:
"""
Properties of an HTTP request such as URL, headers, etc.
"""
__slots__ = (
"__weakref__",
"_cookies",
"_id",
"_ip",
"_parsed_url",
"_port",
"_protocol",
"_remote_addr",
"_socket",
"_match_info",
"_name",
"app",
"body",
"conn_info",
"ctx",
"head",
"headers",
"method",
"parsed_accept",
"parsed_args",
"parsed_credentials",
"parsed_files",
"parsed_form",
"parsed_forwarded",
"parsed_json",
"parsed_not_grouped_args",
"parsed_token",
"raw_url",
"responded",
"request_middleware_started",
"route",
"stream",
"transport",
"version",
)
def __init__(
self,
url_bytes: bytes,
headers: Header,
version: str,
method: str,
transport: TransportProtocol,
app: Sanic,
head: bytes = b"",
):
self.raw_url = url_bytes
# TODO: Content-Encoding detection
self._parsed_url = parse_url(url_bytes)
self._id: Optional[Union[uuid.UUID, str, int]] = None
self._name: Optional[str] = None
self.app = app
self.headers = Header(headers)
self.version = version
self.method = method
self.transport = transport
self.head = head
# Init but do not inhale
self.body = b""
self.conn_info: Optional[ConnInfo] = None
self.ctx = SimpleNamespace()
self.parsed_forwarded: Optional[Options] = None
self.parsed_accept: Optional[AcceptContainer] = None
self.parsed_credentials: Optional[Credentials] = None
self.parsed_json = None
self.parsed_form = None
self.parsed_files = None
self.parsed_token: Optional[str] = None
self.parsed_args: DefaultDict[
Tuple[bool, bool, str, str], RequestParameters
] = defaultdict(RequestParameters)
self.parsed_not_grouped_args: DefaultDict[
Tuple[bool, bool, str, str], List[Tuple[str, str]]
] = defaultdict(list)
self.request_middleware_started = False
self._cookies: Optional[Dict[str, str]] = None
self._match_info: Dict[str, Any] = {}
self.stream: Optional[Http] = None
self.route: Optional[Route] = None
self._protocol = None
self.responded: bool = False
def __repr__(self):
class_name = self.__class__.__name__
return f"<{class_name}: {self.method} {self.path}>"
@classmethod
def generate_id(*_):
return uuid.uuid4()
def reset_response(self):
try:
if (
self.stream is not None
and self.stream.stage is not Stage.HANDLER
):
raise ServerError(
"Cannot reset response because previous response was sent."
)
self.stream.response.stream = None
self.stream.response = None
self.responded = False
except AttributeError:
pass
async def respond(
self,
response: Optional[BaseHTTPResponse] = None,
*,
status: int = 200,
headers: Optional[Union[Header, Dict[str, str]]] = None,
content_type: Optional[str] = None,
):
try:
if self.stream is not None and self.stream.response:
raise ServerError("Second respond call is not allowed.")
except AttributeError:
pass
# This logic of determining which response to use is subject to change
if response is None:
response = HTTPResponse(
status=status,
headers=headers,
content_type=content_type,
)
# Connect the response
if isinstance(response, BaseHTTPResponse) and self.stream:
response = self.stream.respond(response)
# Run response middleware
try:
response = await self.app._run_response_middleware(
self, response, request_name=self.name
)
except CancelledErrors:
raise
except Exception:
error_logger.exception(
"Exception occurred in one of response middleware handlers"
)
self.responded = True
return response
async def receive_body(self):
"""Receive request.body, if not already received.
Streaming handlers may call this to receive the full body. Sanic calls
this function before running any handlers of non-streaming routes.
Custom request classes can override this for custom handling of both
streaming and non-streaming routes.
"""
if not self.body:
self.body = b"".join([data async for data in self.stream])
@property
def name(self):
if self._name:
return self._name
elif self.route:
return self.route.name
return None
@property
def endpoint(self):
return self.name
@property
def uri_template(self):
return f"/{self.route.path}"
@property
def protocol(self):
if not self._protocol:
self._protocol = self.transport.get_protocol()
return self._protocol
@property
def raw_headers(self):
_, headers = self.head.split(b"\r\n", 1)
return bytes(headers)
@property
def request_line(self):
reqline, _ = self.head.split(b"\r\n", 1)
return bytes(reqline)
@property
def id(self) -> Optional[Union[uuid.UUID, str, int]]:
"""
A request ID passed from the client, or generated from the backend.
By default, this will look in a request header defined at:
``self.app.config.REQUEST_ID_HEADER``. It defaults to
``X-Request-ID``. Sanic will try to cast the ID into a ``UUID`` or an
``int``. If there is not a UUID from the client, then Sanic will try
to generate an ID by calling ``Request.generate_id()``. The default
behavior is to generate a ``UUID``. You can customize this behavior
by subclassing ``Request``.
.. code-block:: python
from sanic import Request, Sanic
from itertools import count
class IntRequest(Request):
counter = count()
def generate_id(self):
return next(self.counter)
app = Sanic("MyApp", request_class=IntRequest)
"""
if not self._id:
self._id = self.headers.getone(
self.app.config.REQUEST_ID_HEADER,
self.__class__.generate_id(self), # type: ignore
)
# Try casting to a UUID or an integer
if isinstance(self._id, str):
try:
self._id = uuid.UUID(self._id)
except ValueError:
try:
self._id = int(self._id) # type: ignore
except ValueError:
...
return self._id # type: ignore
@property
def json(self):
if self.parsed_json is None:
self.load_json()
return self.parsed_json
def load_json(self, loads=json_loads):
try:
self.parsed_json = loads(self.body)
except Exception:
if not self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
@property
def accept(self) -> AcceptContainer:
if self.parsed_accept is None:
accept_header = self.headers.getone("accept", "")
self.parsed_accept = parse_accept(accept_header)
return self.parsed_accept
@property
def token(self) -> Optional[str]:
"""Attempt to return the auth header token.
:return: token related to request
"""
if self.parsed_token is None:
prefixes = ("Bearer", "Token")
_, token = parse_credentials(
self.headers.getone("authorization", None), prefixes
)
self.parsed_token = token
return self.parsed_token
@property
def credentials(self) -> Optional[Credentials]:
"""Attempt to return the auth header value.
Covers NoAuth, Basic Auth, Bearer Token, Api Token authentication
schemas.
:return: A named tuple with token or username and password related
to request
"""
if self.parsed_credentials is None:
try:
prefix, credentials = parse_credentials(
self.headers.getone("authorization", None)
)
if credentials:
self.parsed_credentials = Credentials(
auth_type=prefix, token=credentials
)
except ValueError:
pass
return self.parsed_credentials
@property
def form(self):
if self.parsed_form is None:
self.parsed_form = RequestParameters()
self.parsed_files = RequestParameters()
content_type = self.headers.getone(
"content-type", DEFAULT_HTTP_CONTENT_TYPE
)
content_type, parameters = parse_content_header(content_type)
try:
if content_type == "application/x-www-form-urlencoded":
self.parsed_form = RequestParameters(
parse_qs(self.body.decode("utf-8"))
)
elif content_type == "multipart/form-data":
# TODO: Stream this instead of reading to/from memory
boundary = parameters["boundary"].encode("utf-8")
self.parsed_form, self.parsed_files = parse_multipart_form(
self.body, boundary
)
except Exception:
error_logger.exception("Failed when parsing form")
return self.parsed_form
@property
def files(self):
if self.parsed_files is None:
self.form # compute form to get files
return self.parsed_files
def get_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> RequestParameters:
"""
Method to parse `query_string` using `urllib.parse.parse_qs`.
This methods is used by `args` property.
Can be used directly if you need to change default parameters.
:param keep_blank_values:
flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
:type keep_blank_values: bool
:param strict_parsing:
flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
:type strict_parsing: bool
:param encoding:
specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type encoding: str
:param errors:
specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type errors: str
:return: RequestParameters
"""
if (
keep_blank_values,
strict_parsing,
encoding,
errors,
) not in self.parsed_args:
if self.query_string:
self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = RequestParameters(
parse_qs(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
)
return self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
]
args = property(get_args)
def get_query_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> list:
"""
Method to parse `query_string` using `urllib.parse.parse_qsl`.
This methods is used by `query_args` property.
Can be used directly if you need to change default parameters.
:param keep_blank_values:
flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
:type keep_blank_values: bool
:param strict_parsing:
flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
:type strict_parsing: bool
:param encoding:
specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type encoding: str
:param errors:
specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
:type errors: str
:return: list
"""
if (
keep_blank_values,
strict_parsing,
encoding,
errors,
) not in self.parsed_not_grouped_args:
if self.query_string:
self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = parse_qsl(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
return self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
]
query_args = property(get_query_args)
"""
Convenience property to access :meth:`Request.get_query_args` with
default values.
"""
@property
def cookies(self) -> Dict[str, str]:
"""
:return: Incoming cookies on the request
:rtype: Dict[str, str]
"""
if self._cookies is None:
cookie = self.headers.getone("cookie", None)
if cookie is not None:
cookies: SimpleCookie = SimpleCookie()
cookies.load(cookie)
self._cookies = {
name: cookie.value for name, cookie in cookies.items()
}
else:
self._cookies = {}
return self._cookies
@property
def content_type(self) -> str:
"""
:return: Content-Type header form the request
:rtype: str
"""
return self.headers.getone("content-type", DEFAULT_HTTP_CONTENT_TYPE)
@property
def match_info(self):
"""
:return: matched info after resolving route
"""
return self._match_info
@match_info.setter
def match_info(self, value):
self._match_info = value
# Transport properties (obtained from local interface only)
@property
def ip(self) -> str:
"""
:return: peer ip of the socket
:rtype: str
"""
return self.conn_info.client_ip if self.conn_info else ""
@property
def port(self) -> int:
"""
:return: peer port of the socket
:rtype: int
"""
return self.conn_info.client_port if self.conn_info else 0
@property
def socket(self):
return self.conn_info.peername if self.conn_info else (None, None)
@property
def path(self) -> str:
"""
:return: path of the local HTTP request
:rtype: str
"""
return self._parsed_url.path.decode("utf-8")
# Proxy properties (using SERVER_NAME/forwarded/request/transport info)
@property
def forwarded(self) -> Options:
"""
Active proxy information obtained from request headers, as specified in
Sanic configuration.
Field names by, for, proto, host, port and path are normalized.
- for and by IPv6 addresses are bracketed
- port (int) is only set by port headers, not from host.
- path is url-unencoded
Additional values may be available from new style Forwarded headers.
:return: forwarded address info
:rtype: Dict[str, str]
"""
if self.parsed_forwarded is None:
self.parsed_forwarded = (
parse_forwarded(self.headers, self.app.config)
or parse_xforwarded(self.headers, self.app.config)
or {}
)
return self.parsed_forwarded
@property
def remote_addr(self) -> str:
"""
Client IP address, if available.
1. proxied remote address `self.forwarded['for']`
2. local remote address `self.ip`
:return: IPv4, bracketed IPv6, UNIX socket name or arbitrary string
:rtype: str
"""
if not hasattr(self, "_remote_addr"):
self._remote_addr = str(
self.forwarded.get("for", "")
) # or self.ip
return self._remote_addr
@property
def scheme(self) -> str:
"""
Determine request scheme.
1. `config.SERVER_NAME` if in full URL format
2. proxied proto/scheme
3. local connection protocol
:return: http|https|ws|wss or arbitrary value given by the headers.
:rtype: str
"""
if "//" in self.app.config.get("SERVER_NAME", ""):
return self.app.config.SERVER_NAME.split("//")[0]
if "proto" in self.forwarded:
return str(self.forwarded["proto"])
if (
self.app.websocket_enabled
and self.headers.getone("upgrade", "").lower() == "websocket"
):
scheme = "ws"
else:
scheme = "http"
if self.transport.get_extra_info("sslcontext"):
scheme += "s"
return scheme
@property
def host(self) -> str:
"""
The currently effective server 'host' (hostname or hostname:port).
1. `config.SERVER_NAME` overrides any client headers
2. proxied host of original request
3. request host header
hostname and port may be separated by
`sanic.headers.parse_host(request.host)`.
:return: the first matching host found, or empty string
:rtype: str
"""
server_name = self.app.config.get("SERVER_NAME")
if server_name:
return server_name.split("//", 1)[-1].split("/", 1)[0]
return str(
self.forwarded.get("host") or self.headers.getone("host", "")
)
@property
def server_name(self) -> str:
"""
:return: hostname the client connected to, by ``request.host``
:rtype: str
"""
return parse_host(self.host)[0] or ""
@property
def server_port(self) -> int:
"""
The port the client connected to, by forwarded ``port`` or
``request.host``.
Default port is returned as 80 and 443 based on ``request.scheme``.
:return: port number
:rtype: int
"""
port = self.forwarded.get("port") or parse_host(self.host)[1]
return int(port or (80 if self.scheme in ("http", "ws") else 443))
@property
def server_path(self) -> str:
"""
:return: full path of current URL; uses proxied or local path
:rtype: str
"""
return str(self.forwarded.get("path") or self.path)
@property
def query_string(self) -> str:
"""
:return: representation of the requested query
:rtype: str
"""
if self._parsed_url.query:
return self._parsed_url.query.decode("utf-8")
else:
return ""
@property
def url(self) -> str:
"""
:return: the URL
:rtype: str
"""
return urlunparse(
(self.scheme, self.host, self.path, None, self.query_string, None)
)
def url_for(self, view_name: str, **kwargs) -> str:
"""
Same as :func:`sanic.Sanic.url_for`, but automatically determine
`scheme` and `netloc` base on the request. Since this method is aiming
to generate correct schema & netloc, `_external` is implied.
:param kwargs: takes same parameters as in :func:`sanic.Sanic.url_for`
:return: an absolute url to the given view
:rtype: str
"""
# Full URL SERVER_NAME can only be handled in app.url_for
try:
if "//" in self.app.config.SERVER_NAME:
return self.app.url_for(view_name, _external=True, **kwargs)
except AttributeError:
pass
scheme = self.scheme
host = self.server_name
port = self.server_port
if (scheme.lower() in ("http", "ws") and port == 80) or (
scheme.lower() in ("https", "wss") and port == 443
):
netloc = host
else:
netloc = f"{host}:{port}"
return self.app.url_for(
view_name, _external=True, _scheme=scheme, _server=netloc, **kwargs
)
class File(NamedTuple):
"""
Model for defining a file. It is a ``namedtuple``, therefore you can
iterate over the object, or access the parameters by name.
:param type: The mimetype, defaults to text/plain
:param body: Bytes of the file
:param name: The filename
"""
type: str
body: bytes
name: str
def parse_multipart_form(body, boundary):
"""
Parse a request body and returns fields and files
:param body: bytes request body
:param boundary: bytes multipart boundary
:return: fields (RequestParameters), files (RequestParameters)
"""
files = RequestParameters()
fields = RequestParameters()
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
content_type = "text/plain"
content_charset = "utf-8"
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b"\r\n", line_index)
form_line = form_part[line_index:line_end_index].decode("utf-8")
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(":")
idx = colon_index + 2
form_header_field = form_line[0:colon_index].lower()
form_header_value, form_parameters = parse_content_header(
form_line[idx:]
)
if form_header_field == "content-disposition":
field_name = form_parameters.get("name")
file_name = form_parameters.get("filename")
# non-ASCII filenames in RFC2231, "filename*" format
if file_name is None and form_parameters.get("filename*"):
encoding, _, value = email.utils.decode_rfc2231(
form_parameters["filename*"]
)
file_name = unquote(value, encoding=encoding)
elif form_header_field == "content-type":
content_type = form_header_value
content_charset = form_parameters.get("charset", "utf-8")
if field_name:
post_data = form_part[line_index:-4]
if file_name is None:
value = post_data.decode(content_charset)
if field_name in fields:
fields[field_name].append(value)
else:
fields[field_name] = [value]
else:
form_file = File(
type=content_type, name=file_name, body=post_data
)
if field_name in files:
files[field_name].append(form_file)
else:
files[field_name] = [form_file]
else:
logger.debug(
"Form-data field does not have a 'name' parameter "
"in the Content-Disposition header"
)
return fields, files
| 31.712456 | 79 | 0.574538 | from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
DefaultDict,
Dict,
List,
NamedTuple,
Optional,
Tuple,
Union,
)
from sanic_routing.route import Route
from sanic.models.http_types import Credentials
if TYPE_CHECKING:
from sanic.server import ConnInfo
from sanic.app import Sanic
import email.utils
import uuid
from collections import defaultdict
from http.cookies import SimpleCookie
from types import SimpleNamespace
from urllib.parse import parse_qs, parse_qsl, unquote, urlunparse
from httptools import parse_url
from sanic.compat import CancelledErrors, Header
from sanic.constants import DEFAULT_HTTP_CONTENT_TYPE
from sanic.exceptions import InvalidUsage, ServerError
from sanic.headers import (
AcceptContainer,
Options,
parse_accept,
parse_content_header,
parse_credentials,
parse_forwarded,
parse_host,
parse_xforwarded,
)
from sanic.http import Http, Stage
from sanic.log import error_logger, logger
from sanic.models.protocol_types import TransportProtocol
from sanic.response import BaseHTTPResponse, HTTPResponse
try:
from ujson import loads as json_loads
except ImportError:
from json import loads as json_loads
class RequestParameters(dict):
def get(self, name: str, default: Optional[Any] = None) -> Optional[Any]:
return super().get(name, [default])[0]
def getlist(
self, name: str, default: Optional[Any] = None
) -> Optional[Any]:
return super().get(name, default)
class Request:
__slots__ = (
"__weakref__",
"_cookies",
"_id",
"_ip",
"_parsed_url",
"_port",
"_protocol",
"_remote_addr",
"_socket",
"_match_info",
"_name",
"app",
"body",
"conn_info",
"ctx",
"head",
"headers",
"method",
"parsed_accept",
"parsed_args",
"parsed_credentials",
"parsed_files",
"parsed_form",
"parsed_forwarded",
"parsed_json",
"parsed_not_grouped_args",
"parsed_token",
"raw_url",
"responded",
"request_middleware_started",
"route",
"stream",
"transport",
"version",
)
def __init__(
self,
url_bytes: bytes,
headers: Header,
version: str,
method: str,
transport: TransportProtocol,
app: Sanic,
head: bytes = b"",
):
self.raw_url = url_bytes
self._parsed_url = parse_url(url_bytes)
self._id: Optional[Union[uuid.UUID, str, int]] = None
self._name: Optional[str] = None
self.app = app
self.headers = Header(headers)
self.version = version
self.method = method
self.transport = transport
self.head = head
self.body = b""
self.conn_info: Optional[ConnInfo] = None
self.ctx = SimpleNamespace()
self.parsed_forwarded: Optional[Options] = None
self.parsed_accept: Optional[AcceptContainer] = None
self.parsed_credentials: Optional[Credentials] = None
self.parsed_json = None
self.parsed_form = None
self.parsed_files = None
self.parsed_token: Optional[str] = None
self.parsed_args: DefaultDict[
Tuple[bool, bool, str, str], RequestParameters
] = defaultdict(RequestParameters)
self.parsed_not_grouped_args: DefaultDict[
Tuple[bool, bool, str, str], List[Tuple[str, str]]
] = defaultdict(list)
self.request_middleware_started = False
self._cookies: Optional[Dict[str, str]] = None
self._match_info: Dict[str, Any] = {}
self.stream: Optional[Http] = None
self.route: Optional[Route] = None
self._protocol = None
self.responded: bool = False
def __repr__(self):
class_name = self.__class__.__name__
return f"<{class_name}: {self.method} {self.path}>"
@classmethod
def generate_id(*_):
return uuid.uuid4()
def reset_response(self):
try:
if (
self.stream is not None
and self.stream.stage is not Stage.HANDLER
):
raise ServerError(
"Cannot reset response because previous response was sent."
)
self.stream.response.stream = None
self.stream.response = None
self.responded = False
except AttributeError:
pass
async def respond(
self,
response: Optional[BaseHTTPResponse] = None,
*,
status: int = 200,
headers: Optional[Union[Header, Dict[str, str]]] = None,
content_type: Optional[str] = None,
):
try:
if self.stream is not None and self.stream.response:
raise ServerError("Second respond call is not allowed.")
except AttributeError:
pass
if response is None:
response = HTTPResponse(
status=status,
headers=headers,
content_type=content_type,
)
if isinstance(response, BaseHTTPResponse) and self.stream:
response = self.stream.respond(response)
try:
response = await self.app._run_response_middleware(
self, response, request_name=self.name
)
except CancelledErrors:
raise
except Exception:
error_logger.exception(
"Exception occurred in one of response middleware handlers"
)
self.responded = True
return response
async def receive_body(self):
if not self.body:
self.body = b"".join([data async for data in self.stream])
@property
def name(self):
if self._name:
return self._name
elif self.route:
return self.route.name
return None
@property
def endpoint(self):
return self.name
@property
def uri_template(self):
return f"/{self.route.path}"
@property
def protocol(self):
if not self._protocol:
self._protocol = self.transport.get_protocol()
return self._protocol
@property
def raw_headers(self):
_, headers = self.head.split(b"\r\n", 1)
return bytes(headers)
@property
def request_line(self):
reqline, _ = self.head.split(b"\r\n", 1)
return bytes(reqline)
@property
def id(self) -> Optional[Union[uuid.UUID, str, int]]:
if not self._id:
self._id = self.headers.getone(
self.app.config.REQUEST_ID_HEADER,
self.__class__.generate_id(self),
)
if isinstance(self._id, str):
try:
self._id = uuid.UUID(self._id)
except ValueError:
try:
self._id = int(self._id)
except ValueError:
...
return self._id
@property
def json(self):
if self.parsed_json is None:
self.load_json()
return self.parsed_json
def load_json(self, loads=json_loads):
try:
self.parsed_json = loads(self.body)
except Exception:
if not self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
@property
def accept(self) -> AcceptContainer:
if self.parsed_accept is None:
accept_header = self.headers.getone("accept", "")
self.parsed_accept = parse_accept(accept_header)
return self.parsed_accept
@property
def token(self) -> Optional[str]:
if self.parsed_token is None:
prefixes = ("Bearer", "Token")
_, token = parse_credentials(
self.headers.getone("authorization", None), prefixes
)
self.parsed_token = token
return self.parsed_token
@property
def credentials(self) -> Optional[Credentials]:
if self.parsed_credentials is None:
try:
prefix, credentials = parse_credentials(
self.headers.getone("authorization", None)
)
if credentials:
self.parsed_credentials = Credentials(
auth_type=prefix, token=credentials
)
except ValueError:
pass
return self.parsed_credentials
@property
def form(self):
if self.parsed_form is None:
self.parsed_form = RequestParameters()
self.parsed_files = RequestParameters()
content_type = self.headers.getone(
"content-type", DEFAULT_HTTP_CONTENT_TYPE
)
content_type, parameters = parse_content_header(content_type)
try:
if content_type == "application/x-www-form-urlencoded":
self.parsed_form = RequestParameters(
parse_qs(self.body.decode("utf-8"))
)
elif content_type == "multipart/form-data":
boundary = parameters["boundary"].encode("utf-8")
self.parsed_form, self.parsed_files = parse_multipart_form(
self.body, boundary
)
except Exception:
error_logger.exception("Failed when parsing form")
return self.parsed_form
@property
def files(self):
if self.parsed_files is None:
self.form
return self.parsed_files
def get_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> RequestParameters:
if (
keep_blank_values,
strict_parsing,
encoding,
errors,
) not in self.parsed_args:
if self.query_string:
self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = RequestParameters(
parse_qs(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
)
return self.parsed_args[
(keep_blank_values, strict_parsing, encoding, errors)
]
args = property(get_args)
def get_query_args(
self,
keep_blank_values: bool = False,
strict_parsing: bool = False,
encoding: str = "utf-8",
errors: str = "replace",
) -> list:
if (
keep_blank_values,
strict_parsing,
encoding,
errors,
) not in self.parsed_not_grouped_args:
if self.query_string:
self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
] = parse_qsl(
qs=self.query_string,
keep_blank_values=keep_blank_values,
strict_parsing=strict_parsing,
encoding=encoding,
errors=errors,
)
return self.parsed_not_grouped_args[
(keep_blank_values, strict_parsing, encoding, errors)
]
query_args = property(get_query_args)
@property
def cookies(self) -> Dict[str, str]:
if self._cookies is None:
cookie = self.headers.getone("cookie", None)
if cookie is not None:
cookies: SimpleCookie = SimpleCookie()
cookies.load(cookie)
self._cookies = {
name: cookie.value for name, cookie in cookies.items()
}
else:
self._cookies = {}
return self._cookies
@property
def content_type(self) -> str:
return self.headers.getone("content-type", DEFAULT_HTTP_CONTENT_TYPE)
@property
def match_info(self):
return self._match_info
@match_info.setter
def match_info(self, value):
self._match_info = value
@property
def ip(self) -> str:
return self.conn_info.client_ip if self.conn_info else ""
@property
def port(self) -> int:
return self.conn_info.client_port if self.conn_info else 0
@property
def socket(self):
return self.conn_info.peername if self.conn_info else (None, None)
@property
def path(self) -> str:
return self._parsed_url.path.decode("utf-8")
@property
def forwarded(self) -> Options:
if self.parsed_forwarded is None:
self.parsed_forwarded = (
parse_forwarded(self.headers, self.app.config)
or parse_xforwarded(self.headers, self.app.config)
or {}
)
return self.parsed_forwarded
@property
def remote_addr(self) -> str:
if not hasattr(self, "_remote_addr"):
self._remote_addr = str(
self.forwarded.get("for", "")
)
return self._remote_addr
@property
def scheme(self) -> str:
if "//" in self.app.config.get("SERVER_NAME", ""):
return self.app.config.SERVER_NAME.split("//")[0]
if "proto" in self.forwarded:
return str(self.forwarded["proto"])
if (
self.app.websocket_enabled
and self.headers.getone("upgrade", "").lower() == "websocket"
):
scheme = "ws"
else:
scheme = "http"
if self.transport.get_extra_info("sslcontext"):
scheme += "s"
return scheme
@property
def host(self) -> str:
server_name = self.app.config.get("SERVER_NAME")
if server_name:
return server_name.split("//", 1)[-1].split("/", 1)[0]
return str(
self.forwarded.get("host") or self.headers.getone("host", "")
)
@property
def server_name(self) -> str:
return parse_host(self.host)[0] or ""
@property
def server_port(self) -> int:
port = self.forwarded.get("port") or parse_host(self.host)[1]
return int(port or (80 if self.scheme in ("http", "ws") else 443))
@property
def server_path(self) -> str:
return str(self.forwarded.get("path") or self.path)
@property
def query_string(self) -> str:
if self._parsed_url.query:
return self._parsed_url.query.decode("utf-8")
else:
return ""
@property
def url(self) -> str:
return urlunparse(
(self.scheme, self.host, self.path, None, self.query_string, None)
)
def url_for(self, view_name: str, **kwargs) -> str:
try:
if "//" in self.app.config.SERVER_NAME:
return self.app.url_for(view_name, _external=True, **kwargs)
except AttributeError:
pass
scheme = self.scheme
host = self.server_name
port = self.server_port
if (scheme.lower() in ("http", "ws") and port == 80) or (
scheme.lower() in ("https", "wss") and port == 443
):
netloc = host
else:
netloc = f"{host}:{port}"
return self.app.url_for(
view_name, _external=True, _scheme=scheme, _server=netloc, **kwargs
)
class File(NamedTuple):
type: str
body: bytes
name: str
def parse_multipart_form(body, boundary):
files = RequestParameters()
fields = RequestParameters()
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
content_type = "text/plain"
content_charset = "utf-8"
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b"\r\n", line_index)
form_line = form_part[line_index:line_end_index].decode("utf-8")
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(":")
idx = colon_index + 2
form_header_field = form_line[0:colon_index].lower()
form_header_value, form_parameters = parse_content_header(
form_line[idx:]
)
if form_header_field == "content-disposition":
field_name = form_parameters.get("name")
file_name = form_parameters.get("filename")
if file_name is None and form_parameters.get("filename*"):
encoding, _, value = email.utils.decode_rfc2231(
form_parameters["filename*"]
)
file_name = unquote(value, encoding=encoding)
elif form_header_field == "content-type":
content_type = form_header_value
content_charset = form_parameters.get("charset", "utf-8")
if field_name:
post_data = form_part[line_index:-4]
if file_name is None:
value = post_data.decode(content_charset)
if field_name in fields:
fields[field_name].append(value)
else:
fields[field_name] = [value]
else:
form_file = File(
type=content_type, name=file_name, body=post_data
)
if field_name in files:
files[field_name].append(form_file)
else:
files[field_name] = [form_file]
else:
logger.debug(
"Form-data field does not have a 'name' parameter "
"in the Content-Disposition header"
)
return fields, files
| true | true |
f733955e890eb485d0b5a2d7a9e0ecde1d990814 | 4,990 | py | Python | sg_sr/sr_data/sr_cplx/svd/cpxrbm.py | JunaidAkhter/vmc_jax | 4f0dcc9f32cb6885cad3c5d797d9f9e01247f737 | [
"MIT"
] | null | null | null | sg_sr/sr_data/sr_cplx/svd/cpxrbm.py | JunaidAkhter/vmc_jax | 4f0dcc9f32cb6885cad3c5d797d9f9e01247f737 | [
"MIT"
] | null | null | null | sg_sr/sr_data/sr_cplx/svd/cpxrbm.py | JunaidAkhter/vmc_jax | 4f0dcc9f32cb6885cad3c5d797d9f9e01247f737 | [
"MIT"
] | null | null | null | import sys
# Find jVMC package
#sys.path.append("/Users/akhter/githesis-/jvmc/vmc_jax")
sys.path.append("/Users/akhter/thesis/vmc_jax")
import jax
from jax.config import config
config.update("jax_enable_x64", True)
import jax.random as random
import jax.numpy as jnp
import numpy as np
from jax.tree_util import tree_flatten, tree_unflatten
import jVMC
import tensornetwork as tn
tn.set_default_backend("jax")
import functools
from typing import Any, Callable, Sequence, Optional
import flax
from flax import linen as nn
from flax import optim
from jax import lax
from functools import partial
import jVMC.nets.initializers as init
import jVMC.global_defs as global_defs
import time
# DMRG energies produced with the TeNPy library https://github.com/tenpy/tenpy
#DMRG_energies = {"10": -1.0545844370449059, "20": -1.0900383739, "100": -1.1194665474274852}
L = 16 # system size
g = -0.7 # strength of external field
# Set up hamiltonian for open boundary conditions
hamiltonian = jVMC.operator.BranchFreeOperator()
for l in range(L - 1):
hamiltonian.add(jVMC.operator.scal_opstr(-1., (jVMC.operator.Sz(l), jVMC.operator.Sz(l + 1))))
hamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(l), )))
hamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(L - 1), )))
def svd(dp,shape, rank=L):
"""Takes in the concatenated matrix and spits out the copressed one"""
#getting the real and the complex parts of the matrix
real_matrix = jnp.reshape(dp[:L*h], (L,h))
complex_matrix = jnp.reshape(dp[L*h:], (L,h))
print("real_matrix", real_matrix, "complex_matrix:", complex_matrix)
#creating the W matrix from the real and the complex parts
matrix = jax.lax.complex(real_matrix, complex_matrix)
print("matrix:", matrix)
#Now that we have the matrix we can svd it and reject some of the singular values.
tensor1 = jnp.reshape(matrix, shape)
print("tensor1_shape and atype:", tensor1.shape, type(tensor1))
#reshaping the matrix in a tensor of given shape e.g. a four legged tensor
node = tn.Node(tensor1)
#now we perform the svd of the node keeping the left two and the right two legs as they are
u, vh, _ = tn.split_node(node, left_edges=[node[0], node[1]], right_edges=[node[2],node[3]], max_singular_values=r)
print("shape of u:", u.shape, "shape of vh:", vh.shape)
node_contracted = (u @ vh).tensor
matrix_returned = jnp.reshape(node_contracted, (matrix.shape))
print("shape of matrix_returned:", matrix_returned.shape)
return matrix_returned
def simulate(rng, iterations, rank, t_step):
net = net_init
psi = jVMC.vqs.NQS(net, seed=rng) # Variational wave function
# Set up sampler
#tic = time.perf_counter()
sampler = jVMC.sampler.MCSampler(psi, (L,), random.PRNGKey(4321), updateProposer=jVMC.sampler.propose_spin_flip_Z2,
numChains=100, sweepSteps=L,
numSamples=30000, thermalizationSweeps=25)
#toc = time.perf_counter()
#print(" == Total time for sampling step: %fs\n" % (toc - tic))
# Set up TDVP
tdvpEquation = jVMC.util.tdvp.TDVP(sampler, rhsPrefactor=1.,
svdTol=1e-8, diagonalShift=10, makeReal='real')
stepper = jVMC.util.stepper.Euler(timeStep=t_step) # ODE integrator
res = []
for n in range(iterations):
dp, _ = stepper.step(0, tdvpEquation, psi.get_parameters(), hamiltonian=hamiltonian, psi=psi, numSamples=None)
print("dp_inserted", dp)
dp = svd(dp, (4,4,2,2), rank = r)
dp = jnp.concatenate([p.ravel() for p in tree_flatten(dp)[0]])
dp = jnp.concatenate([dp.real, dp.imag])
print("dp_returned", dp)
psi.set_parameters(dp)
print(n, jax.numpy.real(tdvpEquation.ElocMean0) / L, tdvpEquation.ElocVar0 / L)
res.append([jax.numpy.real(tdvpEquation.ElocMean0) / L])
np.savetxt('dp', dp)
return np.array(res)
#iterations = 2500
#rng_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
iterations = 2
rng_list = [0, 1]
time_step = 12e-2
h = L
net_init = jVMC.nets.CpxRBM(numHidden = h, bias = False)
#rank_list = jnp.arange(L/2, L+1)
rank_list = [8,9]
results = []
for j,rng in enumerate(rng_list):
E_0_aarray = np.zeros((iterations, len(rng_list)))#an empty two dimensional array corresponding to the D and "rng".
for r in rank_list:
#print("rng:", rng)
res = simulate(rng, iterations, rank=r, t_step = time_step)
E_0 = res + 1.0660513358196495#this energy is for 16 spins
#adding the energy values obtained to the first entry of the row
#print("length", len(E_0))
E_0_aarray[:, j] = E_0[:, 0]
#print("final_energy:", E_0[-1])
results.apend(E_0_aarray)
#print("E_array", E_0_aarray)
np.savetxt('cpxrbm_16_h16_sr_12t', np.array(results), header='Data for CpxRBM with h = 16 for 1 initializations')
| 34.178082 | 119 | 0.672545 | import sys
sys.path.append("/Users/akhter/thesis/vmc_jax")
import jax
from jax.config import config
config.update("jax_enable_x64", True)
import jax.random as random
import jax.numpy as jnp
import numpy as np
from jax.tree_util import tree_flatten, tree_unflatten
import jVMC
import tensornetwork as tn
tn.set_default_backend("jax")
import functools
from typing import Any, Callable, Sequence, Optional
import flax
from flax import linen as nn
from flax import optim
from jax import lax
from functools import partial
import jVMC.nets.initializers as init
import jVMC.global_defs as global_defs
import time
L = 16
g = -0.7
hamiltonian = jVMC.operator.BranchFreeOperator()
for l in range(L - 1):
hamiltonian.add(jVMC.operator.scal_opstr(-1., (jVMC.operator.Sz(l), jVMC.operator.Sz(l + 1))))
hamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(l), )))
hamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(L - 1), )))
def svd(dp,shape, rank=L):
real_matrix = jnp.reshape(dp[:L*h], (L,h))
complex_matrix = jnp.reshape(dp[L*h:], (L,h))
print("real_matrix", real_matrix, "complex_matrix:", complex_matrix)
matrix = jax.lax.complex(real_matrix, complex_matrix)
print("matrix:", matrix)
tensor1 = jnp.reshape(matrix, shape)
print("tensor1_shape and atype:", tensor1.shape, type(tensor1))
node = tn.Node(tensor1)
u, vh, _ = tn.split_node(node, left_edges=[node[0], node[1]], right_edges=[node[2],node[3]], max_singular_values=r)
print("shape of u:", u.shape, "shape of vh:", vh.shape)
node_contracted = (u @ vh).tensor
matrix_returned = jnp.reshape(node_contracted, (matrix.shape))
print("shape of matrix_returned:", matrix_returned.shape)
return matrix_returned
def simulate(rng, iterations, rank, t_step):
net = net_init
psi = jVMC.vqs.NQS(net, seed=rng)
sampler = jVMC.sampler.MCSampler(psi, (L,), random.PRNGKey(4321), updateProposer=jVMC.sampler.propose_spin_flip_Z2,
numChains=100, sweepSteps=L,
numSamples=30000, thermalizationSweeps=25)
tdvpEquation = jVMC.util.tdvp.TDVP(sampler, rhsPrefactor=1.,
svdTol=1e-8, diagonalShift=10, makeReal='real')
stepper = jVMC.util.stepper.Euler(timeStep=t_step)
res = []
for n in range(iterations):
dp, _ = stepper.step(0, tdvpEquation, psi.get_parameters(), hamiltonian=hamiltonian, psi=psi, numSamples=None)
print("dp_inserted", dp)
dp = svd(dp, (4,4,2,2), rank = r)
dp = jnp.concatenate([p.ravel() for p in tree_flatten(dp)[0]])
dp = jnp.concatenate([dp.real, dp.imag])
print("dp_returned", dp)
psi.set_parameters(dp)
print(n, jax.numpy.real(tdvpEquation.ElocMean0) / L, tdvpEquation.ElocVar0 / L)
res.append([jax.numpy.real(tdvpEquation.ElocMean0) / L])
np.savetxt('dp', dp)
return np.array(res)
iterations = 2
rng_list = [0, 1]
time_step = 12e-2
h = L
net_init = jVMC.nets.CpxRBM(numHidden = h, bias = False)
rank_list = [8,9]
results = []
for j,rng in enumerate(rng_list):
E_0_aarray = np.zeros((iterations, len(rng_list)))
for r in rank_list:
res = simulate(rng, iterations, rank=r, t_step = time_step)
E_0 = res + 1.0660513358196495
E_0_aarray[:, j] = E_0[:, 0]
results.apend(E_0_aarray)
np.savetxt('cpxrbm_16_h16_sr_12t', np.array(results), header='Data for CpxRBM with h = 16 for 1 initializations')
| true | true |
f733965676db0cd299d017c3fa2104464e3702c7 | 65 | py | Python | src/DREAMPlace/dreamplace/ops/lp_dp/lpdp_flow/__init__.py | lbz007/rectanglequery | 59d6eb007bf65480fa3e9245542d0b6071f81831 | [
"BSD-3-Clause"
] | 1 | 2021-01-01T23:39:02.000Z | 2021-01-01T23:39:02.000Z | src/DREAMPlace/dreamplace/ops/lp_dp/lpdp_flow/__init__.py | zoumingzhe/OpenEDA | e87867044b495e40d4276756a6cb13bb38fe49a9 | [
"BSD-3-Clause"
] | null | null | null | src/DREAMPlace/dreamplace/ops/lp_dp/lpdp_flow/__init__.py | zoumingzhe/OpenEDA | e87867044b495e40d4276756a6cb13bb38fe49a9 | [
"BSD-3-Clause"
] | null | null | null | ##
# @file __init__.py
# @author Zhou Fei
# @date Oct 2020
#
| 10.833333 | 21 | 0.584615 | true | true | |
f7339702cad3ed2804fe276b9d1fc6857c368206 | 2,473 | py | Python | PythonAndroid/youtube-dl/lib/python3.5/youtube_dl/extractor/einthusan.py | jianglei12138/python-3.5.1 | 2d248ceba8aa4c14ee43e57ece99cc1a43fd22b7 | [
"PSF-2.0"
] | 10 | 2020-05-29T03:20:03.000Z | 2022-03-29T01:05:20.000Z | youtube_dl/extractor/einthusan.py | huyangfeng/youtobedl | 7b0d1c28597bd38567e5b4e853f669a5a601c6e8 | [
"Unlicense"
] | 5 | 2016-04-22T01:33:31.000Z | 2016-08-04T15:33:19.000Z | PythonSamples/library/files/lib/python2.7/site-packages/youtube_dl/extractor/einthusan.py | jianglei12138/python2.7 | 280aa96d8cac98c03ca8c8ed71541f7ff7817055 | [
"PSF-2.0"
] | 9 | 2020-05-29T03:21:02.000Z | 2021-04-14T03:26:05.000Z | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
remove_start,
sanitized_Request,
)
class EinthusanIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?einthusan\.com/movies/watch.php\?([^#]*?)id=(?P<id>[0-9]+)'
_TESTS = [
{
'url': 'http://www.einthusan.com/movies/watch.php?id=2447',
'md5': 'af244f4458cd667205e513d75da5b8b1',
'info_dict': {
'id': '2447',
'ext': 'mp4',
'title': 'Ek Villain',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:9d29fc91a7abadd4591fb862fa560d93',
}
},
{
'url': 'http://www.einthusan.com/movies/watch.php?id=1671',
'md5': 'ef63c7a803e22315880ed182c10d1c5c',
'info_dict': {
'id': '1671',
'ext': 'mp4',
'title': 'Soodhu Kavvuum',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:05d8a0c0281a4240d86d76e14f2f4d51',
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
request = sanitized_Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0')
webpage = self._download_webpage(request, video_id)
title = self._html_search_regex(
r'<h1><a[^>]+class=["\']movie-title["\'][^>]*>(.+?)</a></h1>',
webpage, 'title')
video_id = self._search_regex(
r'data-movieid=["\'](\d+)', webpage, 'video id', default=video_id)
video_url = self._download_webpage(
'http://cdn.einthusan.com/geturl/%s/hd/London,Washington,Toronto,Dallas,San,Sydney/'
% video_id, video_id)
description = self._html_search_meta('description', webpage)
thumbnail = self._html_search_regex(
r'''<a class="movie-cover-wrapper".*?><img src=["'](.*?)["'].*?/></a>''',
webpage, "thumbnail url", fatal=False)
if thumbnail is not None:
thumbnail = compat_urlparse.urljoin(url, remove_start(thumbnail, '..'))
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'description': description,
}
| 34.830986 | 116 | 0.535786 |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
remove_start,
sanitized_Request,
)
class EinthusanIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?einthusan\.com/movies/watch.php\?([^#]*?)id=(?P<id>[0-9]+)'
_TESTS = [
{
'url': 'http://www.einthusan.com/movies/watch.php?id=2447',
'md5': 'af244f4458cd667205e513d75da5b8b1',
'info_dict': {
'id': '2447',
'ext': 'mp4',
'title': 'Ek Villain',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:9d29fc91a7abadd4591fb862fa560d93',
}
},
{
'url': 'http://www.einthusan.com/movies/watch.php?id=1671',
'md5': 'ef63c7a803e22315880ed182c10d1c5c',
'info_dict': {
'id': '1671',
'ext': 'mp4',
'title': 'Soodhu Kavvuum',
'thumbnail': 're:^https?://.*\.jpg$',
'description': 'md5:05d8a0c0281a4240d86d76e14f2f4d51',
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
request = sanitized_Request(url)
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0')
webpage = self._download_webpage(request, video_id)
title = self._html_search_regex(
r'<h1><a[^>]+class=["\']movie-title["\'][^>]*>(.+?)</a></h1>',
webpage, 'title')
video_id = self._search_regex(
r'data-movieid=["\'](\d+)', webpage, 'video id', default=video_id)
video_url = self._download_webpage(
'http://cdn.einthusan.com/geturl/%s/hd/London,Washington,Toronto,Dallas,San,Sydney/'
% video_id, video_id)
description = self._html_search_meta('description', webpage)
thumbnail = self._html_search_regex(
r'''<a class="movie-cover-wrapper".*?><img src=["'](.*?)["'].*?/></a>''',
webpage, "thumbnail url", fatal=False)
if thumbnail is not None:
thumbnail = compat_urlparse.urljoin(url, remove_start(thumbnail, '..'))
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'description': description,
}
| true | true |
f733979048193e4066264f6686623c3d00567158 | 8,434 | py | Python | sknano/structures/_nanotube_bundle.py | haidi-ustc/scikit-nano | ef9b24165ba37918b3f520657f7311ba139b3e7d | [
"BSD-2-Clause"
] | 21 | 2016-06-08T18:27:20.000Z | 2022-03-22T08:27:46.000Z | sknano/structures/_nanotube_bundle.py | haidi-ustc/scikit-nano | ef9b24165ba37918b3f520657f7311ba139b3e7d | [
"BSD-2-Clause"
] | 8 | 2016-06-24T19:45:58.000Z | 2021-03-25T21:42:29.000Z | sknano/structures/_nanotube_bundle.py | scikit-nano/scikit-nano | ef9b24165ba37918b3f520657f7311ba139b3e7d | [
"BSD-2-Clause"
] | 9 | 2016-12-08T16:35:52.000Z | 2021-06-23T17:13:44.000Z | # -*- coding: utf-8 -*-
"""
==============================================================================
Nanotube bundle base class (:mod:`sknano.structures._nanotube_bundle`)
==============================================================================
.. currentmodule:: sknano.structures._nanotube_bundle
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
import numbers
import numpy as np
from sknano.core.atoms import Atom, vdw_radius_from_basis
from sknano.core.refdata import aCC, grams_per_Da
from sknano.core.math import Vector
from ._extras import get_chiral_indices
__all__ = ['compute_bundle_density', 'NanotubeBundleMixin',
'NanotubeBundleBase']
def compute_bundle_density(*Ch, r_vdw=None, bond=None,
element1=None, element2=None):
"""Compute nanotube bundle mass density \
:math:`\\rho_{\\mathrm{bundle}}(n, m)` in :math:`\\mathrm{g/cm^3}`.
.. math::
\\rho_{\\mathrm{bundle}}(n, m) = \\frac{8\\pi^2 m_{\\mathrm{C}}
\\sqrt{n^2 + m^2 + nm}}{9\\sqrt{3}a_{\\mathrm{CC}}^3 \\times
\\left(\\sqrt{n^2 + m^2 + nm} +
\\frac{\\pi d_{\\mathrm{vdW}}}{\\sqrt{3}a_{\\mathrm{CC}}}\\right)^2}
Parameters
----------
*Ch : {:class:`python:tuple` or :class:`python:int`\ s}
Either a 2-tuple of ints or 2 integers giving the chiral indices
of the nanotube chiral vector
:math:`\\mathbf{C}_h = n\\mathbf{a}_1 + m\\mathbf{a}_2 = (n, m)`.
r_vdw : int
van der Waals radius of nanotube atoms
bond : float, optional
Bond length.
Returns
-------
float
:math:`\\rho_{\\mathrm{bundle}}` in units of
:math:`\\mathrm{\\frac{g}{cm^3}}`
"""
n, m, _ = get_chiral_indices(*Ch)
if bond is None:
bond = aCC
if element1 is None:
element1 = 'C'
if element2 is None:
element2 = 'C'
if r_vdw is None:
r_vdw = vdw_radius_from_basis(element1, element2)
if element1 == element2:
bundle_density = 8 * np.pi ** 2 * Atom(element1).mass * \
np.sqrt(n ** 2 + m ** 2 + n * m) / \
(9 * np.sqrt(3) * bond ** 3 *
(np.sqrt(n ** 2 + m ** 2 + n * m) +
2 * np.pi * r_vdw / (np.sqrt(3) * bond)) ** 2)
else:
bundle_density = 0
# there are 1.6605e-24 grams / Da and 1e-8 cm / angstrom
bundle_density *= grams_per_Da / (1e-8) ** 3
return bundle_density
class NanotubeBundleMixin:
"""Mixin class for nanotube bundles."""
@property
def nx(self):
"""Number of nanotubes along the :math:`x`-axis."""
return self._nx
@nx.setter
def nx(self, value):
"""Set :math:`n_x`"""
if not (isinstance(value, numbers.Number) or value > 0):
raise TypeError('Expected a positive integer.')
self._nx = int(value)
@nx.deleter
def nx(self):
del self._nx
@property
def ny(self):
"""Number of nanotubes along the :math:`y`-axis."""
return self._ny
@ny.setter
def ny(self, value):
"""Set :math:`n_y`"""
if not (isinstance(value, numbers.Number) or value > 0):
raise TypeError('Expected a positive integer.')
self._ny = int(value)
@ny.deleter
def ny(self):
del self._ny
@property
def Lx(self):
return self.nx * (self.dt + 2 * self.vdw_radius) / 10
@property
def Ly(self):
return self.ny * (self.dt + 2 * self.vdw_radius) / 10
@property
def bundle_geometry(self):
return self._bundle_geometry
@bundle_geometry.setter
def bundle_geometry(self, value):
if value is not None and value not in self._bundle_geometries:
print('Unrecognized `bundle_geometry`: {!r}'.format(value))
value = None
self._bundle_geometry = value
@property
def bundle_packing(self):
return self._bundle_packing
@bundle_packing.setter
def bundle_packing(self, value):
if value is None and \
self.bundle_geometry in ('square', 'rectangle'):
value = 'ccp'
elif value is None and \
self.bundle_geometry in ('triangle', 'hexagon'):
value = 'hcp'
if value is not None and value not in ('ccp', 'hcp'):
raise ValueError('Expected value to be `hcp` or `ccp`')
self._bundle_packing = value
# self.generate_bundle_coords()
@bundle_packing.deleter
def bundle_packing(self):
del self._bundle_packing
@property
def bundle_mass(self):
return self.Ntubes * self.tube_mass
@property
def Natoms(self):
"""Number of atoms in nanotube bundle.
**Returns total number of atoms in nanotube bundle.**
Use :attr:`~NanotubeBundleMixin.Natoms_per_tube` to
get a list of the number of atoms in each nanotube in
the bundle.
"""
return np.asarray(self.Natoms_list).sum()
@property
def Natoms_per_bundle(self):
return self.Natoms
@property
def Natoms_list(self):
return [nanotube.Natoms for nanotube in self.bundle_list]
@property
def Ntubes(self):
return len(self.bundle_coords)
@property
def Natoms_per_tube(self):
"""Alias for :attr:`~NanotubeBundleMixin.Natoms_list`."""
return self.Natoms_list
def generate_bundle_coords(self):
"""Generate coordinates of bundle tubes."""
self.r1 = Vector()
self.r2 = Vector()
self.bundle_coords = []
self.r1.x = self.dt + 2 * self.vdw_radius
if self.bundle_packing in ('cubic', 'ccp'):
self.r2.y = self.r1.x
else:
self.r2.x = self.r1.x * np.cos(2 * np.pi / 3)
self.r2.y = self.r1.x * np.sin(2 * np.pi / 3)
if self.bundle_packing is None:
self._bundle_packing = 'hcp'
if self.bundle_geometry == 'hexagon':
nrows = max(self.nx, self.ny, 3)
if nrows % 2 != 1:
nrows += 1
ntubes_per_end_rows = int((nrows + 1) / 2)
row = 0
ntubes_per_row = nrows
while ntubes_per_row >= ntubes_per_end_rows:
if row == 0:
for n in range(ntubes_per_row):
dr = n * self.r1
self.bundle_coords.append(dr)
else:
for nx in range(ntubes_per_row):
for ny in (-row, row):
dr = Vector()
dr.x = abs(ny * self.r2.x)
dr.y = ny * self.r2.y
dr = nx * self.r1 + dr
self.bundle_coords.append(dr)
row += 1
ntubes_per_row = nrows - row
elif self.bundle_geometry == 'rectangle':
Lx = 10 * self.Lx
for nx in range(self.nx):
for ny in range(self.ny):
dr = nx * self.r1 + ny * self.r2
while dr.x < 0:
dr.x += Lx
self.bundle_coords.append(dr)
elif self.bundle_geometry == 'square':
pass
elif self.bundle_geometry == 'triangle':
pass
else:
for nx in range(self.nx):
for ny in range(self.ny):
dr = nx * self.r1 + ny * self.r2
self.bundle_coords.append(dr)
class NanotubeBundleBase(NanotubeBundleMixin):
"""Nanotube bundle structure base class."""
_bundle_geometries = ['square', 'rectangle', 'hexagon']
def __init__(self, *args, nx=1, ny=1, bundle_packing=None,
bundle_geometry=None, **kwargs):
super().__init__(*args, **kwargs)
self.nx = nx
self.ny = ny
self.bundle_geometry = bundle_geometry
self.bundle_packing = bundle_packing
self.bundle_list = []
self.generate_bundle_coords()
def todict(self):
attrdict = super().todict()
attrdict.update(dict(nx=self.nx, ny=self.ny,
bundle_packing=self.bundle_packing,
bundle_geometry=self.bundle_geometry))
return attrdict
| 30.447653 | 78 | 0.541499 |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
import numbers
import numpy as np
from sknano.core.atoms import Atom, vdw_radius_from_basis
from sknano.core.refdata import aCC, grams_per_Da
from sknano.core.math import Vector
from ._extras import get_chiral_indices
__all__ = ['compute_bundle_density', 'NanotubeBundleMixin',
'NanotubeBundleBase']
def compute_bundle_density(*Ch, r_vdw=None, bond=None,
element1=None, element2=None):
n, m, _ = get_chiral_indices(*Ch)
if bond is None:
bond = aCC
if element1 is None:
element1 = 'C'
if element2 is None:
element2 = 'C'
if r_vdw is None:
r_vdw = vdw_radius_from_basis(element1, element2)
if element1 == element2:
bundle_density = 8 * np.pi ** 2 * Atom(element1).mass * \
np.sqrt(n ** 2 + m ** 2 + n * m) / \
(9 * np.sqrt(3) * bond ** 3 *
(np.sqrt(n ** 2 + m ** 2 + n * m) +
2 * np.pi * r_vdw / (np.sqrt(3) * bond)) ** 2)
else:
bundle_density = 0
bundle_density *= grams_per_Da / (1e-8) ** 3
return bundle_density
class NanotubeBundleMixin:
@property
def nx(self):
return self._nx
@nx.setter
def nx(self, value):
if not (isinstance(value, numbers.Number) or value > 0):
raise TypeError('Expected a positive integer.')
self._nx = int(value)
@nx.deleter
def nx(self):
del self._nx
@property
def ny(self):
return self._ny
@ny.setter
def ny(self, value):
if not (isinstance(value, numbers.Number) or value > 0):
raise TypeError('Expected a positive integer.')
self._ny = int(value)
@ny.deleter
def ny(self):
del self._ny
@property
def Lx(self):
return self.nx * (self.dt + 2 * self.vdw_radius) / 10
@property
def Ly(self):
return self.ny * (self.dt + 2 * self.vdw_radius) / 10
@property
def bundle_geometry(self):
return self._bundle_geometry
@bundle_geometry.setter
def bundle_geometry(self, value):
if value is not None and value not in self._bundle_geometries:
print('Unrecognized `bundle_geometry`: {!r}'.format(value))
value = None
self._bundle_geometry = value
@property
def bundle_packing(self):
return self._bundle_packing
@bundle_packing.setter
def bundle_packing(self, value):
if value is None and \
self.bundle_geometry in ('square', 'rectangle'):
value = 'ccp'
elif value is None and \
self.bundle_geometry in ('triangle', 'hexagon'):
value = 'hcp'
if value is not None and value not in ('ccp', 'hcp'):
raise ValueError('Expected value to be `hcp` or `ccp`')
self._bundle_packing = value
@bundle_packing.deleter
def bundle_packing(self):
del self._bundle_packing
@property
def bundle_mass(self):
return self.Ntubes * self.tube_mass
@property
def Natoms(self):
return np.asarray(self.Natoms_list).sum()
@property
def Natoms_per_bundle(self):
return self.Natoms
@property
def Natoms_list(self):
return [nanotube.Natoms for nanotube in self.bundle_list]
@property
def Ntubes(self):
return len(self.bundle_coords)
@property
def Natoms_per_tube(self):
return self.Natoms_list
def generate_bundle_coords(self):
self.r1 = Vector()
self.r2 = Vector()
self.bundle_coords = []
self.r1.x = self.dt + 2 * self.vdw_radius
if self.bundle_packing in ('cubic', 'ccp'):
self.r2.y = self.r1.x
else:
self.r2.x = self.r1.x * np.cos(2 * np.pi / 3)
self.r2.y = self.r1.x * np.sin(2 * np.pi / 3)
if self.bundle_packing is None:
self._bundle_packing = 'hcp'
if self.bundle_geometry == 'hexagon':
nrows = max(self.nx, self.ny, 3)
if nrows % 2 != 1:
nrows += 1
ntubes_per_end_rows = int((nrows + 1) / 2)
row = 0
ntubes_per_row = nrows
while ntubes_per_row >= ntubes_per_end_rows:
if row == 0:
for n in range(ntubes_per_row):
dr = n * self.r1
self.bundle_coords.append(dr)
else:
for nx in range(ntubes_per_row):
for ny in (-row, row):
dr = Vector()
dr.x = abs(ny * self.r2.x)
dr.y = ny * self.r2.y
dr = nx * self.r1 + dr
self.bundle_coords.append(dr)
row += 1
ntubes_per_row = nrows - row
elif self.bundle_geometry == 'rectangle':
Lx = 10 * self.Lx
for nx in range(self.nx):
for ny in range(self.ny):
dr = nx * self.r1 + ny * self.r2
while dr.x < 0:
dr.x += Lx
self.bundle_coords.append(dr)
elif self.bundle_geometry == 'square':
pass
elif self.bundle_geometry == 'triangle':
pass
else:
for nx in range(self.nx):
for ny in range(self.ny):
dr = nx * self.r1 + ny * self.r2
self.bundle_coords.append(dr)
class NanotubeBundleBase(NanotubeBundleMixin):
_bundle_geometries = ['square', 'rectangle', 'hexagon']
def __init__(self, *args, nx=1, ny=1, bundle_packing=None,
bundle_geometry=None, **kwargs):
super().__init__(*args, **kwargs)
self.nx = nx
self.ny = ny
self.bundle_geometry = bundle_geometry
self.bundle_packing = bundle_packing
self.bundle_list = []
self.generate_bundle_coords()
def todict(self):
attrdict = super().todict()
attrdict.update(dict(nx=self.nx, ny=self.ny,
bundle_packing=self.bundle_packing,
bundle_geometry=self.bundle_geometry))
return attrdict
| true | true |
f73398ac99bb6ad76208a2cc03425876fd1c766b | 521 | py | Python | app/tests.py | Sergey-59/magnit_test | f769642deed3d6c92b641a348311104c6bb23b93 | [
"Apache-2.0"
] | null | null | null | app/tests.py | Sergey-59/magnit_test | f769642deed3d6c92b641a348311104c6bb23b93 | [
"Apache-2.0"
] | null | null | null | app/tests.py | Sergey-59/magnit_test | f769642deed3d6c92b641a348311104c6bb23b93 | [
"Apache-2.0"
] | null | null | null | '''
Base test
'''
def test_index(client):
assert client.get('/').status_code == 302
def test_registration(client):
assert client.post('/registration',
json={"email": "test4@gmail.com", "password": "12345", "name": "PyTest"}).status_code == 200
assert client.post('/registration',
json={"password": "12345", "name": "PyTest"}).status_code == 400
assert client.post('/login', json={"email": "test4@gmail.com", "password": "12345"}).status_code == 200
| 28.944444 | 115 | 0.591171 |
def test_index(client):
assert client.get('/').status_code == 302
def test_registration(client):
assert client.post('/registration',
json={"email": "test4@gmail.com", "password": "12345", "name": "PyTest"}).status_code == 200
assert client.post('/registration',
json={"password": "12345", "name": "PyTest"}).status_code == 400
assert client.post('/login', json={"email": "test4@gmail.com", "password": "12345"}).status_code == 200
| true | true |
f73398fb3cd36cbd081f97b2634fad9d29ff25bc | 2,207 | py | Python | soccer/gameplay/plays/skel/binary_clock.py | kasohrab/robocup-software | 73c92878baf960844b5a4b34c72804093f1ea459 | [
"Apache-2.0"
] | null | null | null | soccer/gameplay/plays/skel/binary_clock.py | kasohrab/robocup-software | 73c92878baf960844b5a4b34c72804093f1ea459 | [
"Apache-2.0"
] | null | null | null | soccer/gameplay/plays/skel/binary_clock.py | kasohrab/robocup-software | 73c92878baf960844b5a4b34c72804093f1ea459 | [
"Apache-2.0"
] | null | null | null | import robocup
import constants
import play
import enum
import behavior
import main
import skills.move
import plays.testing.line_up
import time
# Maintains the state of the ball's position by keeping track of which
# half the ball is on and prints on both entering a given state and
# continuously during the execution of a given state.
class BinaryClock(play.Play):
class State(enum.Enum):
# Define your states here.
# eg: some_state = 0
# -----------------------
pass # remove this once you have put in your states
def __init__(self):
super().__init__(continuous=True)
# This is a local variable of this class
# Refer to it with self.current_time
self.current_time = time.localtime().tm_min
# Register the states you defined using 'add_state'.
# eg: self.add_state(WhichHalf.State.<???>,
# behavior.Behavior.State.running)
# ----------------------------------------------------
# Add your state transitions using 'add_transition'.
# eg: self.add_transition(behavior.Behavior.State.start,
# self.State.<???>, lambda: True,
# 'immediately')
# eg: self.add_transition(self.State.<???>, self.State.<???>,
# lambda: <???>,
# 'state change message')
# ------------------------------------------------------------
# EXAMPLE TRANSITION, YOU MAY WANT TO REPLACE THIS
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'immediately')
# Define your own 'on_enter' and 'execute' functions here.
# eg: def on_enter_<???>(self):
# print('Something?')
# eg: def execute_<???>(self):
# print('Something?')
# ---------------------------------------------------------
# Demo of moving to a point.
def on_enter_running(self):
move_point = robocup.Point(0, constants.Field.Length / 2)
self.add_subbehavior(skills.move.Move(move_point), 'test move')
| 37.40678 | 74 | 0.535569 | import robocup
import constants
import play
import enum
import behavior
import main
import skills.move
import plays.testing.line_up
import time
# half the ball is on and prints on both entering a given state and
# continuously during the execution of a given state.
class BinaryClock(play.Play):
class State(enum.Enum):
# Define your states here.
# eg: some_state = 0
# -----------------------
pass # remove this once you have put in your states
def __init__(self):
super().__init__(continuous=True)
# This is a local variable of this class
# Refer to it with self.current_time
self.current_time = time.localtime().tm_min
# Register the states you defined using 'add_state'.
# eg: self.add_state(WhichHalf.State.<???>,
# behavior.Behavior.State.running)
# ----------------------------------------------------
# Add your state transitions using 'add_transition'.
# eg: self.add_transition(behavior.Behavior.State.start,
# self.State.<???>, lambda: True,
# 'immediately')
# eg: self.add_transition(self.State.<???>, self.State.<???>,
# lambda: <???>,
# 'state change message')
# ------------------------------------------------------------
# EXAMPLE TRANSITION, YOU MAY WANT TO REPLACE THIS
self.add_transition(behavior.Behavior.State.start,
behavior.Behavior.State.running, lambda: True,
'immediately')
# Define your own 'on_enter' and 'execute' functions here.
# eg: def on_enter_<???>(self):
# print('Something?')
# eg: def execute_<???>(self):
# print('Something?')
# ---------------------------------------------------------
# Demo of moving to a point.
def on_enter_running(self):
move_point = robocup.Point(0, constants.Field.Length / 2)
self.add_subbehavior(skills.move.Move(move_point), 'test move')
| true | true |
f73399555c889d7db5b4869eb6411073d53546bb | 2,959 | py | Python | wtfml/data_loaders/image/classification.py | nagapavan525/wtfml | f2211addbe423a51b4dbbdec5a40d09649412452 | [
"MIT"
] | 1 | 2020-12-14T05:12:06.000Z | 2020-12-14T05:12:06.000Z | wtfml/data_loaders/image/classification.py | nagapavan525/wtfml | f2211addbe423a51b4dbbdec5a40d09649412452 | [
"MIT"
] | null | null | null | wtfml/data_loaders/image/classification.py | nagapavan525/wtfml | f2211addbe423a51b4dbbdec5a40d09649412452 | [
"MIT"
] | null | null | null | """
__author__: Abhishek Thakur
"""
import torch
import numpy as np
from PIL import Image
from PIL import ImageFile
try:
import torch_xla.core.xla_model as xm
_xla_available = True
except ImportError:
_xla_available = False
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ClassificationDataset:
def __init__(self, image_paths, targets, resize, augmentations=None):
"""
:param image_paths: list of paths to images
:param targets: numpy array
:param resize: tuple or None
:param augmentations: albumentations augmentations
"""
self.image_paths = image_paths
self.targets = targets
self.resize = resize
self.augmentations = augmentations
def __len__(self):
return len(self.image_paths)
def __getitem__(self, item):
image = Image.open(self.image_paths[item])
targets = self.targets[item]
if self.resize is not None:
image = image.resize(
(self.resize[1], self.resize[0]), resample=Image.BILINEAR
)
image = np.array(image)
if self.augmentations is not None:
augmented = self.augmentations(image=image)
image = augmented["image"]
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
return {
"image": torch.tensor(image),
"targets": torch.tensor(targets),
}
class ClassificationDataLoader:
def __init__(self, image_paths, targets, resize, augmentations=None):
"""
:param image_paths: list of paths to images
:param targets: numpy array
:param resize: tuple or None
:param augmentations: albumentations augmentations
"""
self.image_paths = image_paths
self.targets = targets
self.resize = resize
self.augmentations = augmentations
self.dataset = ClassificationDataset(
image_paths=self.image_paths,
targets=self.targets,
resize=self.resize,
augmentations=self.augmentations,
)
def fetch(self, batch_size, num_workers, drop_last=False, shuffle=True, tpu=False):
"""
:param batch_size: batch size
:param num_workers: number of processes to use
:param drop_last: drop the last batch?
:param shuffle: True/False
:param tpu: True/False, to use tpu or not
"""
sampler = None
if tpu:
sampler = torch.utils.data.distributed.DistributedSampler(
self.dataset,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=shuffle,
)
data_loader = torch.utils.data.DataLoader(
self.dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
num_workers=num_workers,
)
return data_loader
| 29.59 | 87 | 0.607638 |
import torch
import numpy as np
from PIL import Image
from PIL import ImageFile
try:
import torch_xla.core.xla_model as xm
_xla_available = True
except ImportError:
_xla_available = False
ImageFile.LOAD_TRUNCATED_IMAGES = True
class ClassificationDataset:
def __init__(self, image_paths, targets, resize, augmentations=None):
self.image_paths = image_paths
self.targets = targets
self.resize = resize
self.augmentations = augmentations
def __len__(self):
return len(self.image_paths)
def __getitem__(self, item):
image = Image.open(self.image_paths[item])
targets = self.targets[item]
if self.resize is not None:
image = image.resize(
(self.resize[1], self.resize[0]), resample=Image.BILINEAR
)
image = np.array(image)
if self.augmentations is not None:
augmented = self.augmentations(image=image)
image = augmented["image"]
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
return {
"image": torch.tensor(image),
"targets": torch.tensor(targets),
}
class ClassificationDataLoader:
def __init__(self, image_paths, targets, resize, augmentations=None):
self.image_paths = image_paths
self.targets = targets
self.resize = resize
self.augmentations = augmentations
self.dataset = ClassificationDataset(
image_paths=self.image_paths,
targets=self.targets,
resize=self.resize,
augmentations=self.augmentations,
)
def fetch(self, batch_size, num_workers, drop_last=False, shuffle=True, tpu=False):
sampler = None
if tpu:
sampler = torch.utils.data.distributed.DistributedSampler(
self.dataset,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=shuffle,
)
data_loader = torch.utils.data.DataLoader(
self.dataset,
batch_size=batch_size,
sampler=sampler,
drop_last=drop_last,
num_workers=num_workers,
)
return data_loader
| true | true |
f73399f9760977ca6b0406f171a5dc7217817bae | 394 | py | Python | lagtraj/forcings/conversion/targets/__init__.py | BuildJet/lagtraj | a49bff9c165b225b37e212dec4c1d319452cc3f3 | [
"MIT"
] | 4 | 2020-04-16T22:57:00.000Z | 2021-10-05T02:37:58.000Z | lagtraj/forcings/conversion/targets/__init__.py | BuildJet/lagtraj | a49bff9c165b225b37e212dec4c1d319452cc3f3 | [
"MIT"
] | 112 | 2020-05-21T09:47:14.000Z | 2022-03-20T16:00:27.000Z | lagtraj/forcings/conversion/targets/__init__.py | BuildJet/lagtraj | a49bff9c165b225b37e212dec4c1d319452cc3f3 | [
"MIT"
] | 5 | 2020-05-14T11:04:07.000Z | 2022-03-11T16:38:35.000Z | import os
import glob
import importlib
def _package_contents():
for path in glob.glob(os.path.join(os.path.dirname(__file__), "*.py")):
path = os.path.basename(path)
if not path.startswith("_"):
module_name = path.replace(".py", "")
yield module_name, importlib.import_module(f"{__package__}.{module_name}")
available = dict(_package_contents())
| 26.266667 | 86 | 0.659898 | import os
import glob
import importlib
def _package_contents():
for path in glob.glob(os.path.join(os.path.dirname(__file__), "*.py")):
path = os.path.basename(path)
if not path.startswith("_"):
module_name = path.replace(".py", "")
yield module_name, importlib.import_module(f"{__package__}.{module_name}")
available = dict(_package_contents())
| true | true |
f7339a9f25e4749bcece34bebe912861f3ed0139 | 89 | py | Python | lhrhost/robot/__init__.py | ethanjli/liquid-handling-robotics | 999ab03c225b4c5382ab9fcac6a4988d0c232c67 | [
"BSD-3-Clause"
] | null | null | null | lhrhost/robot/__init__.py | ethanjli/liquid-handling-robotics | 999ab03c225b4c5382ab9fcac6a4988d0c232c67 | [
"BSD-3-Clause"
] | null | null | null | lhrhost/robot/__init__.py | ethanjli/liquid-handling-robotics | 999ab03c225b4c5382ab9fcac6a4988d0c232c67 | [
"BSD-3-Clause"
] | 1 | 2018-08-03T17:17:31.000Z | 2018-08-03T17:17:31.000Z | """Higher-level abstractions for robot control."""
from lhrhost.robot.robot import Robot
| 29.666667 | 50 | 0.786517 | from lhrhost.robot.robot import Robot
| true | true |
f7339b5e447635f91727aa3325317aed927ac459 | 1,255 | py | Python | ow_lander/scripts/constants.py | nasa/ow_simulator | 662fea6bf83d82e1b0aac69d05c16dee77cd71a5 | [
"NASA-1.3"
] | 97 | 2020-08-10T08:43:14.000Z | 2022-03-21T21:14:15.000Z | ow_lander/scripts/constants.py | AliMuhammadOfficial/ow_simulator | e0c96d74c1f3dea1451c90782172a10cfe183d94 | [
"NASA-1.3"
] | 153 | 2020-08-11T22:37:25.000Z | 2022-03-31T23:29:41.000Z | ow_lander/scripts/constants.py | AliMuhammadOfficial/ow_simulator | e0c96d74c1f3dea1451c90782172a10cfe183d94 | [
"NASA-1.3"
] | 26 | 2020-08-06T17:07:03.000Z | 2022-03-16T01:04:01.000Z | #!/usr/bin/env python2
# The Notices and Disclaimers for Ocean Worlds Autonomy Testbed for Exploration
# Research and Simulation can be found in README.md in the root directory of
# this repository.
## GLOBAL VARS ##
J_SCOOP_YAW = 5
J_HAND_YAW = 4
J_DIST_PITCH = 3
J_PROX_PITCH = 2
J_SHOU_PITCH = 1
J_SHOU_YAW = 0
J_GRINDER = 5
X_SHOU = 0.79
Y_SHOU = 0.175
HAND_Y_OFFSET = 0.0249979319838
SCOOP_OFFSET = 0.215
GRINDER_OFFSET = 0.16
# Distance between scoop center of mass and lower blade
SCOOP_HEIGHT = 0.076
DEFAULT_GROUND_HEIGHT = -0.155
X_DELIV = 0.2
Y_DELIV = 0.2
Z_DELIV = 1.2
SHOU_YAW_DELIV = 0.4439
GUARD_FILTER_AV_WIDTH = 10
# Multiply the slope on the first 10 ticks of the guarded move by this coeff to obtain threshold
GUARD_MAX_SLOPE_BEFORE_CONTACT_COEFF = 5
TRAJ_PUB_RATE = 10
NB_ARM_LINKS = 7
# Distance between center or mass of the scoop and center of rotation in l_wrist
ROT_RADIUS = 0.36
# Distance between wrist center of mass and scoop center of mass
# Component parallel to ground
WRIST_SCOOP_PARAL = 0.2
# Component perperdicular to ground
WRIST_SCOOP_PERP = 0.3
# Radii in dig_circular
R_PARALLEL_TRUE = 0.46
R_PARALLEL_FALSE = 0.25
# Radii in dig_circular for actions
R_PARALLEL_TRUE_A = 0.46
R_PARALLEL_FALSE_A = 0.10
| 22.818182 | 96 | 0.776892 |
HAND_YAW = 4
J_DIST_PITCH = 3
J_PROX_PITCH = 2
J_SHOU_PITCH = 1
J_SHOU_YAW = 0
J_GRINDER = 5
X_SHOU = 0.79
Y_SHOU = 0.175
HAND_Y_OFFSET = 0.0249979319838
SCOOP_OFFSET = 0.215
GRINDER_OFFSET = 0.16
SCOOP_HEIGHT = 0.076
DEFAULT_GROUND_HEIGHT = -0.155
X_DELIV = 0.2
Y_DELIV = 0.2
Z_DELIV = 1.2
SHOU_YAW_DELIV = 0.4439
GUARD_FILTER_AV_WIDTH = 10
GUARD_MAX_SLOPE_BEFORE_CONTACT_COEFF = 5
TRAJ_PUB_RATE = 10
NB_ARM_LINKS = 7
ROT_RADIUS = 0.36
WRIST_SCOOP_PARAL = 0.2
WRIST_SCOOP_PERP = 0.3
R_PARALLEL_TRUE = 0.46
R_PARALLEL_FALSE = 0.25
R_PARALLEL_TRUE_A = 0.46
R_PARALLEL_FALSE_A = 0.10
| true | true |
f7339ba9a7dc732eebf511630a68c4deab31743e | 914 | py | Python | pyblis/tests/utils.py | jcrist/pyblis | d9c67d40a15c656a4681ba1b9ca0c52eff40163c | [
"BSD-3-Clause"
] | 2 | 2020-03-07T14:02:51.000Z | 2021-02-03T05:18:11.000Z | pyblis/tests/utils.py | jcrist/pyblis | d9c67d40a15c656a4681ba1b9ca0c52eff40163c | [
"BSD-3-Clause"
] | null | null | null | pyblis/tests/utils.py | jcrist/pyblis | d9c67d40a15c656a4681ba1b9ca0c52eff40163c | [
"BSD-3-Clause"
] | null | null | null | import pytest
import numpy as np
all_dtypes = pytest.mark.parametrize('dtype', ['f4', 'f8', 'c8', 'c16'])
class Base(object):
def rand(self, dtype, shape=()):
a = np.random.normal(size=shape).astype(dtype)
if np.issubdtype(dtype, np.complexfloating):
a += np.random.normal(size=a.shape) * 1j
return a if a.shape else a.reshape((1,))[0]
def call_base(self, *args, **kwargs):
return self.call(*args, **kwargs)
class NumbaMixin(object):
@property
def error_cls(self):
import numba
return numba.errors.TypingError
@classmethod
def setup_class(cls):
base, full = cls.compile()
cls.base = staticmethod(base)
cls.full = staticmethod(full)
def call(self, *args, **kwargs):
return self.full(*args, **kwargs)
def call_base(self, *args, **kwargs):
return self.base(*args, **kwargs)
| 24.702703 | 72 | 0.608315 | import pytest
import numpy as np
all_dtypes = pytest.mark.parametrize('dtype', ['f4', 'f8', 'c8', 'c16'])
class Base(object):
def rand(self, dtype, shape=()):
a = np.random.normal(size=shape).astype(dtype)
if np.issubdtype(dtype, np.complexfloating):
a += np.random.normal(size=a.shape) * 1j
return a if a.shape else a.reshape((1,))[0]
def call_base(self, *args, **kwargs):
return self.call(*args, **kwargs)
class NumbaMixin(object):
@property
def error_cls(self):
import numba
return numba.errors.TypingError
@classmethod
def setup_class(cls):
base, full = cls.compile()
cls.base = staticmethod(base)
cls.full = staticmethod(full)
def call(self, *args, **kwargs):
return self.full(*args, **kwargs)
def call_base(self, *args, **kwargs):
return self.base(*args, **kwargs)
| true | true |
f7339bfcce133685fc20bcc3937e577b436a7a84 | 822 | py | Python | application/DemandSideNew/Building/DemandProfile.py | FrancisDinh/Smart-Energy-Project | 16b021e127d9ac5c01653abc31d8cc5d0a7a05c6 | [
"MIT"
] | null | null | null | application/DemandSideNew/Building/DemandProfile.py | FrancisDinh/Smart-Energy-Project | 16b021e127d9ac5c01653abc31d8cc5d0a7a05c6 | [
"MIT"
] | 4 | 2021-06-02T00:34:13.000Z | 2021-06-02T00:35:28.000Z | application/DemandSideNew/Building/DemandProfile.py | FrancisDinh/Smart-Energy-Project | 16b021e127d9ac5c01653abc31d8cc5d0a7a05c6 | [
"MIT"
] | null | null | null | import os, sys
import json
import os.path
import numpy
class DemandProfile:
def __init__(self):
cwd = os.getcwd()
self.fname = cwd + '/demand-profile.json'
def get_data(self):
demand={}
with open(self.fname) as demand_info:
demand = json.load(demand_info)
return demand
def calculate_total_demand(self):
data = self.get_data()
total_energy_data=[]
num=0
total_demand = numpy.zeros(24)
for i in data:
value = i[str(1+num)]["Circulation Pump"]+i[str(1+num)]["Dish Washer"]+i[str(1+num)]["Freezer"]+i[str(1+num)]["Washing Machine"]
total_demand[num] = value
num+=1
return total_demand
#sample object
#sample = DemandProfile()
#print(sample.calculate_total_demand()) | 27.4 | 140 | 0.600973 | import os, sys
import json
import os.path
import numpy
class DemandProfile:
def __init__(self):
cwd = os.getcwd()
self.fname = cwd + '/demand-profile.json'
def get_data(self):
demand={}
with open(self.fname) as demand_info:
demand = json.load(demand_info)
return demand
def calculate_total_demand(self):
data = self.get_data()
total_energy_data=[]
num=0
total_demand = numpy.zeros(24)
for i in data:
value = i[str(1+num)]["Circulation Pump"]+i[str(1+num)]["Dish Washer"]+i[str(1+num)]["Freezer"]+i[str(1+num)]["Washing Machine"]
total_demand[num] = value
num+=1
return total_demand
| true | true |
f7339c3e2aaf49ae2d4dd0b7a9e21662f14c3370 | 1,355 | py | Python | src/senjyu/ml/clustering/kmeans.py | Koukyosyumei/Senjyu | 70faa45e13cb3b1ccdee8a40146a03d60abe11e5 | [
"Apache-2.0"
] | null | null | null | src/senjyu/ml/clustering/kmeans.py | Koukyosyumei/Senjyu | 70faa45e13cb3b1ccdee8a40146a03d60abe11e5 | [
"Apache-2.0"
] | null | null | null | src/senjyu/ml/clustering/kmeans.py | Koukyosyumei/Senjyu | 70faa45e13cb3b1ccdee8a40146a03d60abe11e5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from mpi4py import MPI
class Kmeans:
def __init__(self, k=3, num_iterations=100, seed=42):
self.k = k
self.num_iterations = num_iterations
self.centorids = None
self.dim = None
self.n = None
np.random.seed(seed)
def train(self, X, parallel=False):
if parallel:
pass
else:
return self._train_standalone(X)
def _init_distiution(self, args=None):
self.args = args
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
def _em_standalone(self, X):
# E-step
distance = np.zeros((self.k, self.n))
for cluster_id in range(self.k):
distance[cluster_id, :] = np.linalg.norm(
X - self.centorids[cluster_id, :], axis=1
)
pred = np.argmin(distance, axis=0)
# M-step
for cluster_id in range(self.k):
self.centorids[cluster_id, :] = np.mean(X[pred == cluster_id, :], axis=0)
return pred
def _train_standalone(self, X):
self.n = X.shape[0]
self.dim = X.shape[1]
self.centorids = np.random.normal(0, 1, (self.k, self.dim))
for _ in range(self.num_iterations):
pred = self._em_standalone(X)
return pred
| 26.568627 | 85 | 0.563838 | import numpy as np
from mpi4py import MPI
class Kmeans:
def __init__(self, k=3, num_iterations=100, seed=42):
self.k = k
self.num_iterations = num_iterations
self.centorids = None
self.dim = None
self.n = None
np.random.seed(seed)
def train(self, X, parallel=False):
if parallel:
pass
else:
return self._train_standalone(X)
def _init_distiution(self, args=None):
self.args = args
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
def _em_standalone(self, X):
distance = np.zeros((self.k, self.n))
for cluster_id in range(self.k):
distance[cluster_id, :] = np.linalg.norm(
X - self.centorids[cluster_id, :], axis=1
)
pred = np.argmin(distance, axis=0)
for cluster_id in range(self.k):
self.centorids[cluster_id, :] = np.mean(X[pred == cluster_id, :], axis=0)
return pred
def _train_standalone(self, X):
self.n = X.shape[0]
self.dim = X.shape[1]
self.centorids = np.random.normal(0, 1, (self.k, self.dim))
for _ in range(self.num_iterations):
pred = self._em_standalone(X)
return pred
| true | true |
f7339c847db5d1e76e116f1c088045635c3233e4 | 6,571 | py | Python | xalpha/realtime.py | Aaron-YunZhao/xalpha | 76dc6390cb5714b1c004f7e79e4af832ad1e6fa5 | [
"MIT"
] | 1 | 2020-03-15T01:48:52.000Z | 2020-03-15T01:48:52.000Z | xalpha/realtime.py | tersapp/xalpha | 76dc6390cb5714b1c004f7e79e4af832ad1e6fa5 | [
"MIT"
] | null | null | null | xalpha/realtime.py | tersapp/xalpha | 76dc6390cb5714b1c004f7e79e4af832ad1e6fa5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
module for realtime watch and notfication
"""
import datetime as dt
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.utils import formataddr, parseaddr
from re import match
import pandas as pd
from xalpha.cons import today
from xalpha.info import _download, fundinfo
from xalpha.trade import trade
def _format_addr(s):
"""
parse the email sender and receiver, Chinese encode and support
:param s: eg. 'name <email@website.com>, name2 <email2@web2.com>'
"""
name, addr = parseaddr(s)
return formataddr((Header(name, "utf-8").encode(), addr))
def mail(
title,
content,
sender=None,
receiver=None,
password=None,
server=None,
port=None,
sender_name="sender",
receiver_name=None,
):
"""
send email
:param title: str, title of the email
:param content: str, content of the email, plain text only
:param conf: all other paramters can be import as a dictionay, eg.conf = {'sender': 'aaa@bb.com',
'sender_name':'name', 'receiver':['aaa@bb.com','ccc@dd.com'], 'password':'123456',
'server':'smtp.bb.com','port':123, 'receiver_name':['me','guest']}.
The receiver_name and sender_name options can be omitted.
"""
ret = True
try:
if receiver_name is None:
receiver_name = ["receiver" for _ in receiver]
msg = MIMEText(content, "plain", "utf-8")
msg["From"] = _format_addr("%s <%s>" % (sender_name, sender))
# 括号里的对应发件人邮箱昵称、发件人邮箱账号
receivestr = ""
for i, s in enumerate(receiver):
receivestr += receiver_name[i]
receivestr += " <"
receivestr += s
receivestr += ">, "
msg["To"] = _format_addr(receivestr) # 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg["Subject"] = title # 邮件的主题,即标题
server = smtplib.SMTP_SSL(server, port) # 发件人邮箱中的SMTP服务器和端口号
server.login(sender, password) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(
sender, receiver, msg.as_string()
) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit()
except Exception:
ret = False
return ret
class rtdata:
"""
get real time data of specific funds
:param code: string of six digitals for funds
"""
def __init__(self, code):
url = "http://fundgz.1234567.com.cn/js/" + code + ".js"
page = _download(url)
self.code = code
self.rtvalue = float(match(r'.*"gsz":"(\d*\.\d*)",.*', page.text)[1])
self.name = match(r'.*"name":"([^,]*)",.*', page.text)[1]
self.time = dt.datetime.strptime(
match(r'.*"gztime":"([\d\s\-\:]*)".*', page.text)[1], "%Y-%m-%d %H:%M"
)
def rfundinfo(
code, round_label=0, dividend_label=0, fetch=False, save=False, path="", form="csv"
):
"""
give a fundinfo object with todays estimate netvalue at running time
:param code: string of six digitals for funds
:param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init
:param save: boolean, when open the save option, info classes automatically save the class to files
:param path: string, the file path prefix of IO
:param form: string, the format of IO, options including: 'csv'
:returns: the fundinfo object
"""
fundobj = fundinfo(
code,
round_label=round_label,
dividend_label=dividend_label,
fetch=fetch,
save=save,
path=path,
form=form,
)
rt = rtdata(code)
rtdate = dt.datetime.combine(rt.time, dt.time.min)
rtvalue = rt.rtvalue
if (rtdate - fundobj.price.iloc[-1].date).days > 0:
fundobj.price = fundobj.price.append(
pd.DataFrame(
[[rtdate, rtvalue, fundobj.price.iloc[-1].totvalue, 0]],
columns=["date", "netvalue", "totvalue", "comment"],
),
ignore_index=True,
)
return fundobj
class review:
"""
review policys and give the realtime purchase suggestions
:param policylist: list of policy object
:param namelist: list of names of corresponding policy, default as 0 to n-1
:param date: object of datetime, check date, today is prefered, date other than is not guaranteed
"""
def __init__(self, policylist, namelist=None, date=today()):
self.warn = []
self.message = []
self.policylist = policylist
if namelist is None:
self.namelist = [i for i in range(len(policylist))]
else:
self.namelist = namelist
assert len(self.policylist) == len(self.namelist)
for i, policy in enumerate(policylist):
row = policy.status[policy.status["date"] == date]
if len(row) == 1:
warn = (
policy.aim.name,
policy.aim.code,
row.iloc[0].loc[policy.aim.code],
self.namelist[i],
)
self.warn.append(warn)
if warn[2] > 0:
sug = "买入%s元" % warn[2]
elif warn[2] < 0:
ratio = -warn[2] / 0.005 * 100
share = (
trade(fundinfo(warn[1]), policy.status)
.briefdailyreport()
.get("currentshare", 0)
)
share = -warn[2] / 0.005 * share
sug = "卖出%s%%的份额,也即%s份额" % (ratio, share)
self.message.append(
"根据%s计划,建议%s,%s(%s)" % (warn[3], sug, warn[0], warn[1])
)
self.content = "\n".join(map(str, self.message))
def __str__(self):
return self.content
def notification(self, conf):
"""
send email of self.content, at least support for qq email sender
:param conf: the configuration dictionary for email send settings, no ** before the dict in needed.
eg.conf = {'sender': 'aaa@bb.com',
'sender_name':'name', 'receiver':['aaa@bb.com','ccc@dd.com'], 'password':'123456',
'server':'smtp.bb.com','port':123, 'receiver_name':['me','guest']}.
The receiver_name and sender_name options can be omitted.
"""
if self.content:
ret = mail("Notification", self.content, **conf)
if ret:
print("邮件发送成功")
else:
print("邮件发送失败")
else:
print("没有提醒待发送")
| 33.35533 | 118 | 0.564906 |
import datetime as dt
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from email.utils import formataddr, parseaddr
from re import match
import pandas as pd
from xalpha.cons import today
from xalpha.info import _download, fundinfo
from xalpha.trade import trade
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, "utf-8").encode(), addr))
def mail(
title,
content,
sender=None,
receiver=None,
password=None,
server=None,
port=None,
sender_name="sender",
receiver_name=None,
):
ret = True
try:
if receiver_name is None:
receiver_name = ["receiver" for _ in receiver]
msg = MIMEText(content, "plain", "utf-8")
msg["From"] = _format_addr("%s <%s>" % (sender_name, sender))
receivestr = ""
for i, s in enumerate(receiver):
receivestr += receiver_name[i]
receivestr += " <"
receivestr += s
receivestr += ">, "
msg["To"] = _format_addr(receivestr)
msg["Subject"] = title
server = smtplib.SMTP_SSL(server, port)
server.login(sender, password)
server.sendmail(
sender, receiver, msg.as_string()
)
server.quit()
except Exception:
ret = False
return ret
class rtdata:
def __init__(self, code):
url = "http://fundgz.1234567.com.cn/js/" + code + ".js"
page = _download(url)
self.code = code
self.rtvalue = float(match(r'.*"gsz":"(\d*\.\d*)",.*', page.text)[1])
self.name = match(r'.*"name":"([^,]*)",.*', page.text)[1]
self.time = dt.datetime.strptime(
match(r'.*"gztime":"([\d\s\-\:]*)".*', page.text)[1], "%Y-%m-%d %H:%M"
)
def rfundinfo(
code, round_label=0, dividend_label=0, fetch=False, save=False, path="", form="csv"
):
fundobj = fundinfo(
code,
round_label=round_label,
dividend_label=dividend_label,
fetch=fetch,
save=save,
path=path,
form=form,
)
rt = rtdata(code)
rtdate = dt.datetime.combine(rt.time, dt.time.min)
rtvalue = rt.rtvalue
if (rtdate - fundobj.price.iloc[-1].date).days > 0:
fundobj.price = fundobj.price.append(
pd.DataFrame(
[[rtdate, rtvalue, fundobj.price.iloc[-1].totvalue, 0]],
columns=["date", "netvalue", "totvalue", "comment"],
),
ignore_index=True,
)
return fundobj
class review:
def __init__(self, policylist, namelist=None, date=today()):
self.warn = []
self.message = []
self.policylist = policylist
if namelist is None:
self.namelist = [i for i in range(len(policylist))]
else:
self.namelist = namelist
assert len(self.policylist) == len(self.namelist)
for i, policy in enumerate(policylist):
row = policy.status[policy.status["date"] == date]
if len(row) == 1:
warn = (
policy.aim.name,
policy.aim.code,
row.iloc[0].loc[policy.aim.code],
self.namelist[i],
)
self.warn.append(warn)
if warn[2] > 0:
sug = "买入%s元" % warn[2]
elif warn[2] < 0:
ratio = -warn[2] / 0.005 * 100
share = (
trade(fundinfo(warn[1]), policy.status)
.briefdailyreport()
.get("currentshare", 0)
)
share = -warn[2] / 0.005 * share
sug = "卖出%s%%的份额,也即%s份额" % (ratio, share)
self.message.append(
"根据%s计划,建议%s,%s(%s)" % (warn[3], sug, warn[0], warn[1])
)
self.content = "\n".join(map(str, self.message))
def __str__(self):
return self.content
def notification(self, conf):
if self.content:
ret = mail("Notification", self.content, **conf)
if ret:
print("邮件发送成功")
else:
print("邮件发送失败")
else:
print("没有提醒待发送")
| true | true |
f7339d1d513d9c652ded4f4a4dc0b3fea224e681 | 781 | py | Python | project_euler/ex25_1000digit_fib_number.py | ralphribeiro/uri-projecteuler | 7151d86e014aea9c56026cc88f50b4e940117dd8 | [
"MIT"
] | null | null | null | project_euler/ex25_1000digit_fib_number.py | ralphribeiro/uri-projecteuler | 7151d86e014aea9c56026cc88f50b4e940117dd8 | [
"MIT"
] | null | null | null | project_euler/ex25_1000digit_fib_number.py | ralphribeiro/uri-projecteuler | 7151d86e014aea9c56026cc88f50b4e940117dd8 | [
"MIT"
] | null | null | null | """
The Fibonacci sequence is defined by the recurrence relation:
Fn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.
Hence the first 12 terms will be:
F1 = 1
F2 = 1
F3 = 2
F4 = 3
F5 = 5
F6 = 8
F7 = 13
F8 = 21
F9 = 34
F10 = 55
F11 = 89
F12 = 144
The 12th term, F12, is the first term to contain three digits.
What is the index of the first term in the Fibonacci sequence to contain
1000 digits?
"""
def fib():
last = 1
penultimate = 1
yield last
yield penultimate
while True:
ret = last + penultimate
penultimate = last
yield ret
last = ret
f = fib()
index = 1
while True:
ret = next(f)
ret_list = [n for n in str(ret)]
if len(ret_list) > 999:
print(index, ret)
break
index += 1 | 16.617021 | 76 | 0.583867 |
def fib():
last = 1
penultimate = 1
yield last
yield penultimate
while True:
ret = last + penultimate
penultimate = last
yield ret
last = ret
f = fib()
index = 1
while True:
ret = next(f)
ret_list = [n for n in str(ret)]
if len(ret_list) > 999:
print(index, ret)
break
index += 1 | true | true |
f7339d56480d376c52eba03d3815e0df05ad5d71 | 619 | py | Python | GMXToPython.py | Karuji/GMProjectImporter | 2e810dcaf740304550a82315e720ad39cdbc4fe7 | [
"MIT"
] | null | null | null | GMXToPython.py | Karuji/GMProjectImporter | 2e810dcaf740304550a82315e720ad39cdbc4fe7 | [
"MIT"
] | null | null | null | GMXToPython.py | Karuji/GMProjectImporter | 2e810dcaf740304550a82315e720ad39cdbc4fe7 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
import os
from Element import Element
class GMXToPython(object):
def __init__(self, xmlFile):
self.gmxroot = ET.parse(xmlFile).getroot()
self.root = Element(self.gmxroot)
for child in self.gmxroot:
self.process(child, self.root)
def process(self, element, parent):
elem = Element(element)
elem.parent = parent
parent.children.append(elem)
elem.generation = parent.generation +1
elem.generateCleanText()
if elem.parent == self.root:
elem.primogen = elem.tag
else:
elem.primogen = parent.primogen
for child in element:
self.process(child, elem)
| 22.925926 | 44 | 0.728595 | import xml.etree.ElementTree as ET
import os
from Element import Element
class GMXToPython(object):
def __init__(self, xmlFile):
self.gmxroot = ET.parse(xmlFile).getroot()
self.root = Element(self.gmxroot)
for child in self.gmxroot:
self.process(child, self.root)
def process(self, element, parent):
elem = Element(element)
elem.parent = parent
parent.children.append(elem)
elem.generation = parent.generation +1
elem.generateCleanText()
if elem.parent == self.root:
elem.primogen = elem.tag
else:
elem.primogen = parent.primogen
for child in element:
self.process(child, elem)
| true | true |
f7339e8916132c9d410b936f39152a5243dc3a95 | 13,341 | py | Python | convlab/modules/e2e/multiwoz/Mem2Seq/utils/utils_babi_mem2seq.py | ngduyanhece/ConvLab | a04582a77537c1a706fbf64715baa9ad0be1301a | [
"MIT"
] | 405 | 2019-06-17T05:38:47.000Z | 2022-03-29T15:16:51.000Z | convlab/modules/e2e/multiwoz/Mem2Seq/utils/utils_babi_mem2seq.py | ngduyanhece/ConvLab | a04582a77537c1a706fbf64715baa9ad0be1301a | [
"MIT"
] | 69 | 2019-06-20T22:57:41.000Z | 2022-03-04T12:12:07.000Z | convlab/modules/e2e/multiwoz/Mem2Seq/utils/utils_babi_mem2seq.py | ngduyanhece/ConvLab | a04582a77537c1a706fbf64715baa9ad0be1301a | [
"MIT"
] | 124 | 2019-06-17T05:11:23.000Z | 2021-12-31T05:58:18.000Z | # Modified by Microsoft Corporation.
# Licensed under the MIT license.
import logging
import torch
import torch.utils.data as data
from torch.autograd import Variable
from utils.config import *
from utils.until_temp import entityList
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
MEM_TOKEN_SIZE = 3
class Lang:
def __init__(self):
self.word2index = {}
self.word2count = {}
self.index2word = {UNK_token: 'UNK', PAD_token: "PAD", EOS_token: "EOS", SOS_token: "SOS"}
self.n_words = 4 # Count default tokens
def index_words(self, story, trg=False):
if trg:
for word in story.split(' '):
self.index_word(word)
else:
for word_triple in story:
for word in word_triple:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
class Dataset(data.Dataset):
"""Custom data.Dataset compatible with data.DataLoader."""
def __init__(self, src_seq, trg_seq, index_seq, gate_seq,src_word2id, trg_word2id,max_len, conv_seq,ent,ID,kb_arr):
"""Reads source and target sequences from txt files."""
self.src_seqs = src_seq
self.trg_seqs = trg_seq
self.index_seqs = index_seq
self.gate_seq = gate_seq
self.num_total_seqs = len(self.src_seqs)
self.src_word2id = src_word2id
self.trg_word2id = trg_word2id
self.max_len = max_len
self.conv_seq = conv_seq
self.ent = ent
self.ID = ID
self.kb_arr = kb_arr
def __getitem__(self, index):
"""Returns one data pair (source and target)."""
src_seq = self.src_seqs[index]
trg_seq = self.trg_seqs[index]
index_s = self.index_seqs[index]
gete_s = self.gate_seq[index]
src_seq = self.preprocess(src_seq, self.src_word2id, trg=False)
trg_seq = self.preprocess(trg_seq, self.trg_word2id)
index_s = self.preprocess_inde(index_s,src_seq)
gete_s = self.preprocess_gate(gete_s)
conv_seq = self.conv_seq[index]
conv_seq = self.preprocess(conv_seq, self.src_word2id, trg=False)
ID = self.ID[index]
kb_arr = self.kb_arr[index]
return src_seq, trg_seq, index_s, gete_s,self.max_len,self.src_seqs[index],self.trg_seqs[index], conv_seq,self.ent[index], ID, kb_arr
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence, word2id, trg=True):
"""Converts words to ids."""
if trg:
story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]+ [EOS_token]
else:
story = []
for i, word_triple in enumerate(sequence):
story.append([])
for ii, word in enumerate(word_triple):
temp = word2id[word] if word in word2id else UNK_token
story[i].append(temp)
try:
story = torch.Tensor(story)
except:
print(sequence)
print(story)
return story
def preprocess_inde(self, sequence, src_seq):
"""Converts words to ids."""
sequence = sequence + [len(src_seq)-1]
sequence = torch.Tensor(sequence)
return sequence
def preprocess_gate(self, sequence):
"""Converts words to ids."""
sequence = sequence + [0]
sequence = torch.Tensor(sequence)
return sequence
def collate_fn(data):
def merge(sequences,max_len):
lengths = [len(seq) for seq in sequences]
if (max_len):
padded_seqs = torch.ones(len(sequences), max(lengths), MEM_TOKEN_SIZE).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i,:end,:] = seq[:end]
else:
padded_seqs = torch.ones(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
return padded_seqs, lengths
# sort a list by sequence length (descending order) to use pack_padded_sequence
data.sort(key=lambda x: len(x[0]), reverse=True)
# seperate source and target sequences
src_seqs, trg_seqs, ind_seqs, gete_s, max_len, src_plain,trg_plain, conv_seq, ent, ID, kb_arr = zip(*data)
# merge sequences (from tuple of 1D tensor to 2D tensor)
src_seqs, src_lengths = merge(src_seqs,max_len)
trg_seqs, trg_lengths = merge(trg_seqs,None)
ind_seqs, _ = merge(ind_seqs,None)
gete_s, _ = merge(gete_s,None)
conv_seqs, conv_lengths = merge(conv_seq, max_len)
src_seqs = Variable(src_seqs).transpose(0,1)
trg_seqs = Variable(trg_seqs).transpose(0,1)
ind_seqs = Variable(ind_seqs).transpose(0,1)
gete_s = Variable(gete_s).transpose(0,1)
conv_seqs = Variable(conv_seqs).transpose(0,1)
if USE_CUDA:
src_seqs = src_seqs.cuda()
trg_seqs = trg_seqs.cuda()
ind_seqs = ind_seqs.cuda()
gete_s = gete_s.cuda()
conv_seqs = conv_seqs.cuda()
return src_seqs, src_lengths, trg_seqs, trg_lengths, ind_seqs, gete_s, src_plain, trg_plain, conv_seqs, conv_lengths, ent, ID, kb_arr
def read_langs(file_name, entity, max_line = None):
logging.info(("Reading lines from {}".format(file_name)))
data=[]
contex_arr = []
conversation_arr = []
kb_arr = []
u=None
r=None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_name) as fin:
cnt_ptr = 0
cnt_voc = 0
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
if '\t' in line:
u, r = line.split('\t')
if u!='<SILENCE>': user_counter += 1
system_counter += 1
gen_u = generate_memory(u, "$u", str(time_counter))
contex_arr += gen_u
conversation_arr += gen_u
r_index = []
gate = []
for key in r.split(' '):
if ENTPTR:
if (key in entity):
index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]
if (index):
index = max(index)
gate.append(1)
cnt_ptr +=1
else:
index = len(contex_arr)
cnt_voc +=1
else:
index = len(contex_arr)
gate.append(0)
cnt_voc +=1
else:
index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]
if (index):
index = max(index)
gate.append(1)
cnt_ptr +=1
else:
index = len(contex_arr)
gate.append(0)
cnt_voc +=1
r_index.append(index)
system_res_counter += 1
if len(r_index) > max_r_len:
max_r_len = len(r_index)
contex_arr_temp = contex_arr + [['$$$$']*MEM_TOKEN_SIZE]
ent = []
for key in r.split(' '):
if(key in entity):
ent.append(key)
data.append([contex_arr_temp,r,r_index,gate,list(conversation_arr),ent,dialog_counter, kb_arr])
gen_r = generate_memory(r, "$s", str(time_counter))
contex_arr += gen_r
conversation_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r=line
if USEKB:
temp = generate_memory(r, "", "")
contex_arr += temp
kb_arr += temp
else:
cnt_lin+=1
if(max_line and cnt_lin>=max_line):
break
contex_arr=[]
conversation_arr = []
kb_arr = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d[0]) for d in data])
logging.info("Pointer percentace= {} ".format(cnt_ptr/(cnt_ptr+cnt_voc)))
logging.info("Max responce Len: {}".format(max_r_len))
logging.info("Max Input Len: {}".format(max_len))
logging.info("Avg. User Utterances: {}".format(user_counter*1.0/dialog_counter))
logging.info("Avg. Bot Utterances: {}".format(system_counter*1.0/dialog_counter))
logging.info("Avg. KB results: {}".format(KB_counter*1.0/dialog_counter))
logging.info("Avg. responce Len: {}".format(system_res_counter*1.0/system_counter))
print('Sample: ',data[1][0],data[1][1],data[1][2],data[1][3])
return data, max_len, max_r_len
def generate_memory(sent, speaker, time):
sent_new = []
sent_token = sent.split(' ')
if speaker=="$u" or speaker=="$s":
for word in sent_token:
temp = [word, speaker, 't'+str(time)] + ["PAD"]*(MEM_TOKEN_SIZE-3)
sent_new.append(temp)
else:
if sent_token[1]=="R_rating":
sent_token = sent_token + ["PAD"]*(MEM_TOKEN_SIZE-len(sent_token))
else:
sent_token = sent_token[::-1] + ["PAD"]*(MEM_TOKEN_SIZE-len(sent_token))
sent_new.append(sent_token)
return sent_new
def get_seq(pairs,lang,batch_size,type,max_len):
x_seq = []
y_seq = []
ptr_seq = []
gate_seq = []
conv_seq = []
ent = []
ID = []
kb_arr = []
for pair in pairs:
x_seq.append(pair[0])
y_seq.append(pair[1])
ptr_seq.append(pair[2])
gate_seq.append(pair[3])
conv_seq.append(pair[4])
ent.append(pair[5])
ID.append(pair[6])
kb_arr.append(pair[7])
if(type):
lang.index_words(pair[0])
lang.index_words(pair[1], trg=True)
dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len, conv_seq,ent,ID,kb_arr)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=type,
collate_fn=collate_fn)
return data_loader
def prepare_data_seq(task,batch_size=100,shuffle=True):
file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)
file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)
file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)
if (int(task) != 6):
file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)
if int(task)!=6:
ent = entityList('data/dialog-bAbI-tasks/dialog-babi-kb-all.txt',int(task))
else:
ent = entityList('data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-kb.txt',int(task))
pair_train,max_len_train, max_r_train = read_langs(file_train, ent, max_line=None)
pair_dev,max_len_dev, max_r_dev = read_langs(file_dev, ent, max_line=None)
pair_test,max_len_test, max_r_test = read_langs(file_test, ent, max_line=None)
max_r_test_OOV = 0
max_len_test_OOV = 0
if (int(task) != 6):
pair_test_OOV,max_len_test_OOV, max_r_test_OOV = read_langs(file_test_OOV, ent, max_line=None)
max_len = max(max_len_train,max_len_dev,max_len_test,max_len_test_OOV) + 1
max_r = max(max_r_train,max_r_dev,max_r_test,max_r_test_OOV) +1
lang = Lang()
train = get_seq(pair_train,lang,batch_size,True,max_len)
dev = get_seq(pair_dev,lang,batch_size,False,max_len)
test = get_seq(pair_test,lang,batch_size,False,max_len)
if (int(task) != 6):
testOOV = get_seq(pair_test_OOV,lang,batch_size,False,max_len)
else:
testOOV = []
logging.info("Read %s sentence pairs train" % len(pair_train))
logging.info("Read %s sentence pairs dev" % len(pair_dev))
logging.info("Read %s sentence pairs test" % len(pair_test))
if (int(task) != 6):
logging.info("Read %s sentence pairs test" % len(pair_test_OOV))
logging.info("Max len Input %s " % max_len)
logging.info("Vocab_size %s " % lang.n_words)
logging.info("USE_CUDA={}".format(USE_CUDA))
return train, dev, test, testOOV, lang, max_len, max_r | 39.008772 | 141 | 0.55603 |
import logging
import torch
import torch.utils.data as data
from torch.autograd import Variable
from utils.config import *
from utils.until_temp import entityList
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
MEM_TOKEN_SIZE = 3
class Lang:
def __init__(self):
self.word2index = {}
self.word2count = {}
self.index2word = {UNK_token: 'UNK', PAD_token: "PAD", EOS_token: "EOS", SOS_token: "SOS"}
self.n_words = 4
def index_words(self, story, trg=False):
if trg:
for word in story.split(' '):
self.index_word(word)
else:
for word_triple in story:
for word in word_triple:
self.index_word(word)
def index_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
class Dataset(data.Dataset):
def __init__(self, src_seq, trg_seq, index_seq, gate_seq,src_word2id, trg_word2id,max_len, conv_seq,ent,ID,kb_arr):
self.src_seqs = src_seq
self.trg_seqs = trg_seq
self.index_seqs = index_seq
self.gate_seq = gate_seq
self.num_total_seqs = len(self.src_seqs)
self.src_word2id = src_word2id
self.trg_word2id = trg_word2id
self.max_len = max_len
self.conv_seq = conv_seq
self.ent = ent
self.ID = ID
self.kb_arr = kb_arr
def __getitem__(self, index):
src_seq = self.src_seqs[index]
trg_seq = self.trg_seqs[index]
index_s = self.index_seqs[index]
gete_s = self.gate_seq[index]
src_seq = self.preprocess(src_seq, self.src_word2id, trg=False)
trg_seq = self.preprocess(trg_seq, self.trg_word2id)
index_s = self.preprocess_inde(index_s,src_seq)
gete_s = self.preprocess_gate(gete_s)
conv_seq = self.conv_seq[index]
conv_seq = self.preprocess(conv_seq, self.src_word2id, trg=False)
ID = self.ID[index]
kb_arr = self.kb_arr[index]
return src_seq, trg_seq, index_s, gete_s,self.max_len,self.src_seqs[index],self.trg_seqs[index], conv_seq,self.ent[index], ID, kb_arr
def __len__(self):
return self.num_total_seqs
def preprocess(self, sequence, word2id, trg=True):
if trg:
story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]+ [EOS_token]
else:
story = []
for i, word_triple in enumerate(sequence):
story.append([])
for ii, word in enumerate(word_triple):
temp = word2id[word] if word in word2id else UNK_token
story[i].append(temp)
try:
story = torch.Tensor(story)
except:
print(sequence)
print(story)
return story
def preprocess_inde(self, sequence, src_seq):
sequence = sequence + [len(src_seq)-1]
sequence = torch.Tensor(sequence)
return sequence
def preprocess_gate(self, sequence):
sequence = sequence + [0]
sequence = torch.Tensor(sequence)
return sequence
def collate_fn(data):
def merge(sequences,max_len):
lengths = [len(seq) for seq in sequences]
if (max_len):
padded_seqs = torch.ones(len(sequences), max(lengths), MEM_TOKEN_SIZE).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i,:end,:] = seq[:end]
else:
padded_seqs = torch.ones(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq[:end]
return padded_seqs, lengths
data.sort(key=lambda x: len(x[0]), reverse=True)
src_seqs, trg_seqs, ind_seqs, gete_s, max_len, src_plain,trg_plain, conv_seq, ent, ID, kb_arr = zip(*data)
src_seqs, src_lengths = merge(src_seqs,max_len)
trg_seqs, trg_lengths = merge(trg_seqs,None)
ind_seqs, _ = merge(ind_seqs,None)
gete_s, _ = merge(gete_s,None)
conv_seqs, conv_lengths = merge(conv_seq, max_len)
src_seqs = Variable(src_seqs).transpose(0,1)
trg_seqs = Variable(trg_seqs).transpose(0,1)
ind_seqs = Variable(ind_seqs).transpose(0,1)
gete_s = Variable(gete_s).transpose(0,1)
conv_seqs = Variable(conv_seqs).transpose(0,1)
if USE_CUDA:
src_seqs = src_seqs.cuda()
trg_seqs = trg_seqs.cuda()
ind_seqs = ind_seqs.cuda()
gete_s = gete_s.cuda()
conv_seqs = conv_seqs.cuda()
return src_seqs, src_lengths, trg_seqs, trg_lengths, ind_seqs, gete_s, src_plain, trg_plain, conv_seqs, conv_lengths, ent, ID, kb_arr
def read_langs(file_name, entity, max_line = None):
logging.info(("Reading lines from {}".format(file_name)))
data=[]
contex_arr = []
conversation_arr = []
kb_arr = []
u=None
r=None
user_counter = 0
system_counter = 0
system_res_counter = 0
KB_counter = 0
dialog_counter = 0
with open(file_name) as fin:
cnt_ptr = 0
cnt_voc = 0
max_r_len = 0
cnt_lin = 1
time_counter = 1
for line in fin:
line=line.strip()
if line:
nid, line = line.split(' ', 1)
if '\t' in line:
u, r = line.split('\t')
if u!='<SILENCE>': user_counter += 1
system_counter += 1
gen_u = generate_memory(u, "$u", str(time_counter))
contex_arr += gen_u
conversation_arr += gen_u
r_index = []
gate = []
for key in r.split(' '):
if ENTPTR:
if (key in entity):
index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]
if (index):
index = max(index)
gate.append(1)
cnt_ptr +=1
else:
index = len(contex_arr)
cnt_voc +=1
else:
index = len(contex_arr)
gate.append(0)
cnt_voc +=1
else:
index = [loc for loc, val in enumerate(contex_arr) if (val[0] == key)]
if (index):
index = max(index)
gate.append(1)
cnt_ptr +=1
else:
index = len(contex_arr)
gate.append(0)
cnt_voc +=1
r_index.append(index)
system_res_counter += 1
if len(r_index) > max_r_len:
max_r_len = len(r_index)
contex_arr_temp = contex_arr + [['$$$$']*MEM_TOKEN_SIZE]
ent = []
for key in r.split(' '):
if(key in entity):
ent.append(key)
data.append([contex_arr_temp,r,r_index,gate,list(conversation_arr),ent,dialog_counter, kb_arr])
gen_r = generate_memory(r, "$s", str(time_counter))
contex_arr += gen_r
conversation_arr += gen_r
time_counter += 1
else:
KB_counter += 1
r=line
if USEKB:
temp = generate_memory(r, "", "")
contex_arr += temp
kb_arr += temp
else:
cnt_lin+=1
if(max_line and cnt_lin>=max_line):
break
contex_arr=[]
conversation_arr = []
kb_arr = []
time_counter = 1
dialog_counter += 1
max_len = max([len(d[0]) for d in data])
logging.info("Pointer percentace= {} ".format(cnt_ptr/(cnt_ptr+cnt_voc)))
logging.info("Max responce Len: {}".format(max_r_len))
logging.info("Max Input Len: {}".format(max_len))
logging.info("Avg. User Utterances: {}".format(user_counter*1.0/dialog_counter))
logging.info("Avg. Bot Utterances: {}".format(system_counter*1.0/dialog_counter))
logging.info("Avg. KB results: {}".format(KB_counter*1.0/dialog_counter))
logging.info("Avg. responce Len: {}".format(system_res_counter*1.0/system_counter))
print('Sample: ',data[1][0],data[1][1],data[1][2],data[1][3])
return data, max_len, max_r_len
def generate_memory(sent, speaker, time):
sent_new = []
sent_token = sent.split(' ')
if speaker=="$u" or speaker=="$s":
for word in sent_token:
temp = [word, speaker, 't'+str(time)] + ["PAD"]*(MEM_TOKEN_SIZE-3)
sent_new.append(temp)
else:
if sent_token[1]=="R_rating":
sent_token = sent_token + ["PAD"]*(MEM_TOKEN_SIZE-len(sent_token))
else:
sent_token = sent_token[::-1] + ["PAD"]*(MEM_TOKEN_SIZE-len(sent_token))
sent_new.append(sent_token)
return sent_new
def get_seq(pairs,lang,batch_size,type,max_len):
x_seq = []
y_seq = []
ptr_seq = []
gate_seq = []
conv_seq = []
ent = []
ID = []
kb_arr = []
for pair in pairs:
x_seq.append(pair[0])
y_seq.append(pair[1])
ptr_seq.append(pair[2])
gate_seq.append(pair[3])
conv_seq.append(pair[4])
ent.append(pair[5])
ID.append(pair[6])
kb_arr.append(pair[7])
if(type):
lang.index_words(pair[0])
lang.index_words(pair[1], trg=True)
dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len, conv_seq,ent,ID,kb_arr)
data_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=type,
collate_fn=collate_fn)
return data_loader
def prepare_data_seq(task,batch_size=100,shuffle=True):
file_train = 'data/dialog-bAbI-tasks/dialog-babi-task{}trn.txt'.format(task)
file_dev = 'data/dialog-bAbI-tasks/dialog-babi-task{}dev.txt'.format(task)
file_test = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst.txt'.format(task)
if (int(task) != 6):
file_test_OOV = 'data/dialog-bAbI-tasks/dialog-babi-task{}tst-OOV.txt'.format(task)
if int(task)!=6:
ent = entityList('data/dialog-bAbI-tasks/dialog-babi-kb-all.txt',int(task))
else:
ent = entityList('data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-kb.txt',int(task))
pair_train,max_len_train, max_r_train = read_langs(file_train, ent, max_line=None)
pair_dev,max_len_dev, max_r_dev = read_langs(file_dev, ent, max_line=None)
pair_test,max_len_test, max_r_test = read_langs(file_test, ent, max_line=None)
max_r_test_OOV = 0
max_len_test_OOV = 0
if (int(task) != 6):
pair_test_OOV,max_len_test_OOV, max_r_test_OOV = read_langs(file_test_OOV, ent, max_line=None)
max_len = max(max_len_train,max_len_dev,max_len_test,max_len_test_OOV) + 1
max_r = max(max_r_train,max_r_dev,max_r_test,max_r_test_OOV) +1
lang = Lang()
train = get_seq(pair_train,lang,batch_size,True,max_len)
dev = get_seq(pair_dev,lang,batch_size,False,max_len)
test = get_seq(pair_test,lang,batch_size,False,max_len)
if (int(task) != 6):
testOOV = get_seq(pair_test_OOV,lang,batch_size,False,max_len)
else:
testOOV = []
logging.info("Read %s sentence pairs train" % len(pair_train))
logging.info("Read %s sentence pairs dev" % len(pair_dev))
logging.info("Read %s sentence pairs test" % len(pair_test))
if (int(task) != 6):
logging.info("Read %s sentence pairs test" % len(pair_test_OOV))
logging.info("Max len Input %s " % max_len)
logging.info("Vocab_size %s " % lang.n_words)
logging.info("USE_CUDA={}".format(USE_CUDA))
return train, dev, test, testOOV, lang, max_len, max_r | true | true |
f7339ef7fada422038c26aff3a05596353cb5673 | 643 | py | Python | main.py | fossabot/ar4maps | 053a0bc623c40a8b3aa1e3e7ce57b10f00ae2849 | [
"MIT"
] | 6 | 2020-05-26T10:13:45.000Z | 2021-12-04T08:46:59.000Z | main.py | fossabot/ar4maps | 053a0bc623c40a8b3aa1e3e7ce57b10f00ae2849 | [
"MIT"
] | 1 | 2020-05-25T15:03:10.000Z | 2020-05-25T15:03:10.000Z | main.py | fossabot/ar4maps | 053a0bc623c40a8b3aa1e3e7ce57b10f00ae2849 | [
"MIT"
] | 2 | 2020-05-25T14:55:40.000Z | 2020-12-06T03:52:27.000Z | # *****************************************************************************
# * Author: Miguel Magalhaes
# * Email: miguel@magalhaes.pro
# *****************************************************************************
# * Main
# *****************************************************************************
import sys
import yaml
from PyQt5.QtWidgets import QApplication
from interface import Interface
if __name__ == "__main__":
app = QApplication(sys.argv)
with open(sys.argv[1] + 'config.yml') as f:
config = yaml.safe_load(f)
win = Interface(sys.argv[1], config)
win.show()
sys.exit(app.exec_())
| 30.619048 | 79 | 0.415241 |
import sys
import yaml
from PyQt5.QtWidgets import QApplication
from interface import Interface
if __name__ == "__main__":
app = QApplication(sys.argv)
with open(sys.argv[1] + 'config.yml') as f:
config = yaml.safe_load(f)
win = Interface(sys.argv[1], config)
win.show()
sys.exit(app.exec_())
| true | true |
f7339f758dce9f4a54ec4e742425982bcaa3bec7 | 1,770 | py | Python | chords/admin.py | Ilias95/guitarchords | 4477ad1110718ad64d2180b6dc9a5f03eb49ebde | [
"MIT"
] | 4 | 2015-08-28T23:35:54.000Z | 2016-12-30T15:26:50.000Z | chords/admin.py | Ilias95/guitarchords | 4477ad1110718ad64d2180b6dc9a5f03eb49ebde | [
"MIT"
] | null | null | null | chords/admin.py | Ilias95/guitarchords | 4477ad1110718ad64d2180b6dc9a5f03eb49ebde | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Artist, Song
admin.AdminSite.site_title = 'Chords administration'
admin.AdminSite.site_header = 'Chords Administration'
class ArtistAdmin(admin.ModelAdmin):
exclude = ['slug']
actions = ['delete_selected']
search_fields = ['name']
def delete_selected(self, request, queryset):
for artist in queryset:
artist.delete()
delete_selected.short_description = 'Delete selected artists (custom)'
class SongAdmin(admin.ModelAdmin):
fieldsets = [
('General', {'fields': ['title', 'artist', 'genre']}),
('User information', {'fields': ['sender'], 'classes': ['collapse']}),
('Content', {'fields': ['content', 'tabs', 'video']}),
('Published', {'fields': ['published']}),
]
list_display = ['full_title', 'reg_date', 'pub_date', 'published']
list_filter = ['pub_date', 'reg_date', 'genre', 'tabs']
search_fields = ['title', 'artist__name']
actions = ['delete_selected', 'publish_songs', 'unpublish_songs']
def publish_songs(self, request, queryset):
for song in queryset:
if not song.published:
song.publish()
publish_songs.short_description = 'Publish all selected songs'
def unpublish_songs(self, request, queryset):
for song in queryset:
if song.published:
song.unpublish()
unpublish_songs.short_description = 'Unpublish all selected songs'
def delete_selected(self, request, queryset):
for artist in queryset:
artist.delete()
delete_selected.short_description = 'Delete selected songs (custom)'
admin.site.register(Artist, ArtistAdmin)
admin.site.register(Song, SongAdmin)
| 31.607143 | 78 | 0.642938 | from django.contrib import admin
from .models import Artist, Song
admin.AdminSite.site_title = 'Chords administration'
admin.AdminSite.site_header = 'Chords Administration'
class ArtistAdmin(admin.ModelAdmin):
exclude = ['slug']
actions = ['delete_selected']
search_fields = ['name']
def delete_selected(self, request, queryset):
for artist in queryset:
artist.delete()
delete_selected.short_description = 'Delete selected artists (custom)'
class SongAdmin(admin.ModelAdmin):
fieldsets = [
('General', {'fields': ['title', 'artist', 'genre']}),
('User information', {'fields': ['sender'], 'classes': ['collapse']}),
('Content', {'fields': ['content', 'tabs', 'video']}),
('Published', {'fields': ['published']}),
]
list_display = ['full_title', 'reg_date', 'pub_date', 'published']
list_filter = ['pub_date', 'reg_date', 'genre', 'tabs']
search_fields = ['title', 'artist__name']
actions = ['delete_selected', 'publish_songs', 'unpublish_songs']
def publish_songs(self, request, queryset):
for song in queryset:
if not song.published:
song.publish()
publish_songs.short_description = 'Publish all selected songs'
def unpublish_songs(self, request, queryset):
for song in queryset:
if song.published:
song.unpublish()
unpublish_songs.short_description = 'Unpublish all selected songs'
def delete_selected(self, request, queryset):
for artist in queryset:
artist.delete()
delete_selected.short_description = 'Delete selected songs (custom)'
admin.site.register(Artist, ArtistAdmin)
admin.site.register(Song, SongAdmin)
| true | true |
f7339ffceb1271b59fe05fe8af5b4d70a0b4e922 | 340 | py | Python | Jumping around and changing speed.py | Toulik1729231/Python3.7 | 56acd1af1b7c7e664c7bd8bd6eec0740871b6815 | [
"MIT"
] | null | null | null | Jumping around and changing speed.py | Toulik1729231/Python3.7 | 56acd1af1b7c7e664c7bd8bd6eec0740871b6815 | [
"MIT"
] | null | null | null | Jumping around and changing speed.py | Toulik1729231/Python3.7 | 56acd1af1b7c7e664c7bd8bd6eec0740871b6815 | [
"MIT"
] | null | null | null | import turtle
ninja = turtle.Turtle()
ninja.speed(10)
for i in range(180):
ninja.forward(100)
ninja.right(30)
ninja.forward(20)
ninja.left(60)
ninja.forward(50)
ninja.right(30)
ninja.penup()
ninja.setposition(0, 0)
ninja.pendown()
ninja.right(2)
turtle.done()
| 15.454545 | 28 | 0.570588 | import turtle
ninja = turtle.Turtle()
ninja.speed(10)
for i in range(180):
ninja.forward(100)
ninja.right(30)
ninja.forward(20)
ninja.left(60)
ninja.forward(50)
ninja.right(30)
ninja.penup()
ninja.setposition(0, 0)
ninja.pendown()
ninja.right(2)
turtle.done()
| true | true |
f733a1fa06978ea0db0e27d7877804c982cfb8be | 211 | py | Python | tests/conftest.py | dustye/policyguru | 16da990ff600468077660acf10a9db6682454df1 | [
"MIT"
] | 8 | 2021-01-25T03:27:44.000Z | 2022-01-18T08:07:43.000Z | tests/conftest.py | dustye/policyguru | 16da990ff600468077660acf10a9db6682454df1 | [
"MIT"
] | 2 | 2021-04-24T22:49:20.000Z | 2021-06-10T16:25:37.000Z | tests/conftest.py | dustye/policyguru | 16da990ff600468077660acf10a9db6682454df1 | [
"MIT"
] | 4 | 2021-04-24T23:06:56.000Z | 2021-11-18T22:50:26.000Z | import pytest
from starlette.testclient import TestClient
from policyguru.main import app
@pytest.fixture(scope="module")
def test_app():
client = TestClient(app)
yield client # testing happens here
| 19.181818 | 43 | 0.763033 | import pytest
from starlette.testclient import TestClient
from policyguru.main import app
@pytest.fixture(scope="module")
def test_app():
client = TestClient(app)
yield client
| true | true |
f733a244a6d71168c73c92692652d8e6b6eb0e22 | 6,761 | py | Python | Mini-DeepText-2.0/train.py | Ethan-Yang0101/Mini-DeepText-Project | 6ed70fae7d00610b942fb9b2526d11ebfd1b48f7 | [
"MIT"
] | null | null | null | Mini-DeepText-2.0/train.py | Ethan-Yang0101/Mini-DeepText-Project | 6ed70fae7d00610b942fb9b2526d11ebfd1b48f7 | [
"MIT"
] | null | null | null | Mini-DeepText-2.0/train.py | Ethan-Yang0101/Mini-DeepText-Project | 6ed70fae7d00610b942fb9b2526d11ebfd1b48f7 | [
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
from TextDataset import TextDataset
from Model.BasicModel.TextCLRModel import TextCLRModel
from Model.BasicModel.TextSLBModel import TextSLBModel
from Model.BasicModel.TextNMTModel import TextNMTModel
from Model.BasicModel.TextDSMModel import TextDSMModel
from Model.Transformer.Transformer import Transformer
from Vectorizer.CLRVectorizer import CLRVectorizer
from Vectorizer.SLBVectorizer import SLBVectorizer
from Vectorizer.NMTVectorizer import NMTVectorizer
from Vectorizer.DSMVectorizer import DSMVectorizer
from Utils.Data import read_json_dataset
from ModelTrainer import ModelTrainer
from Utils.Config import Config
import json
import sys
import os
def get_data_loaders(args, dataset):
'''通过数据集创建用于训练,验证和测试的数据批生成器'''
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
if os.path.exists(args.vectorizer_file):
parameters = {'dataset': dataset,
'split_ratio': args.split_ratio,
'max_seq_length': args.max_seq_length,
'task': args.task,
'vectorizer_file': args.vectorizer_file}
dataset = TextDataset.dataset_load_vectorizer(**parameters)
else:
parameters = {'dataset': dataset,
'split_ratio': args.split_ratio,
'max_seq_length': args.max_seq_length,
'task': args.task,
'cutoff': args.cutoff}
dataset = TextDataset.dataset_make_vectorizer(**parameters)
dataset.save_vectorizer(args.vectorizer_file)
dataset.set_split('train')
train_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True)
dataset.set_split('val')
val_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True)
dataset.set_split('test')
test_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True)
data_loaders = (train_data_loader, val_data_loader, test_data_loader)
return data_loaders
def get_task_model(args, vectorizer):
'''根据任务类型获取用于训练的模型类型'''
model = None
if args.task == 'classification':
if args.model_name == 'TextCLRModel':
model = TextCLRModel(
num_embeddings=len(vectorizer.source_vocab),
embedding_dim=args.embedding_size,
rnn_hidden_size=args.rnn_hidden_size,
num_classes=len(vectorizer.label_vocab),
padding_idx=vectorizer.source_vocab.mask_index,
batch_first=True)
if args.task == 'labeling':
if args.model_name == 'TextSLBModel':
model = TextSLBModel(
num_embeddings=len(vectorizer.source_vocab),
embedding_dim=args.embedding_size,
rnn_hidden_size=args.rnn_hidden_size,
padding_idx=vectorizer.source_vocab.mask_index,
batch_first=True)
if args.task == 'matching':
if args.model_name == 'TextDSMModel':
model = TextDSMModel(
num_embeddings1=len(vectorizer.source_vocab),
num_embeddings2=len(vectorizer.target_vocab),
embedding_dim=args.embedding_size,
rnn_hidden_size=args.rnn_hidden_size,
padding_idx=vectorizer.source_vocab.mask_index,
batch_first=True)
if args.task == 'translation':
if args.model_name == 'Transformer':
model = Transformer(
source_vocab_size=len(vectorizer.source_vocab),
target_vocab_size=len(vectorizer.target_vocab),
source_embed_dim=args.source_embed_dim,
target_embed_dim=args.target_embed_dim,
encoder_n_heads=args.encoder_n_heads,
decoder_n_heads=args.decoder_n_heads,
encoder_hid_dim=args.encoder_hid_dim,
decoder_hid_dim=args.decoder_hid_dim,
encoder_n_layers=args.encoder_n_layers,
decoder_n_layers=args.decoder_n_layers,
encoder_max_seq_len=args.max_seq_length,
decoder_max_seq_len=args.max_seq_length
)
if args.model_name == 'TextNMTModel':
model = TextNMTModel(
source_num_embeddings=len(vectorizer.source_vocab),
source_embedding_size=args.source_embedding_size,
target_num_embeddings=len(vectorizer.target_vocab),
target_embedding_size=args.target_embedding_size,
encoding_size=args.encoding_size)
return model
def get_optimizer(args, model):
'''获取想要使用的优化器'''
if args.optimizer == 'adam':
return optim.Adam(model.parameters(), lr=args.learning_rate)
def get_loss_func(args):
'''根据任务类型获取损失函数'''
if args.task == 'classification':
return nn.CrossEntropyLoss()
if args.task == 'matching':
return nn.CrossEntropyLoss()
if args.task == 'labeling':
return sequence_loss
if args.task == 'translation':
return sequence_loss
def sequence_loss(pred, target, mask_index):
'''用于计算序列模型的损失函数'''
pred = pred.contiguous().view(-1, pred.size(2))
target = target.contiguous().view(-1)
return F.cross_entropy(pred, target, ignore_index=mask_index)
def get_vectorizer(args):
'''根据任务获取矢量化器'''
with open(args.vectorizer_file, "r") as fp:
if args.task == 'classification':
return CLRVectorizer.from_serializable(json.load(fp))
if args.task == 'matching':
return DSMVectorizer.from_serializable(json.load(fp))
if args.task == 'labeling':
return GENVectorizer.from_serializable(json.load(fp))
if args.task == 'translation':
return NMTVectorizer.from_serializable(json.load(fp))
if __name__ == '__main__':
# 获取配置文件信息
config_filename = sys.argv[1]
config = Config.from_config_json(config_filename)
args = config.args
# 获取数据集
dataset = read_json_dataset(args.data_filepath, args.max_seq_length)
# 获取数据批生成器
data_loaders = get_data_loaders(args, dataset)
# 获取模型
vectorizer = get_vectorizer(args)
model = get_task_model(args, vectorizer)
# 获取优化器
optimizer = get_optimizer(args, model)
# 获取损失函数
loss_func = get_loss_func(args)
# 获取训练器
model_trainer = ModelTrainer(
args, data_loaders, model, optimizer, loss_func)
# 训练模型
model_trainer.train_val_test_model()
| 39.538012 | 79 | 0.659222 |
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch.optim as optim
from TextDataset import TextDataset
from Model.BasicModel.TextCLRModel import TextCLRModel
from Model.BasicModel.TextSLBModel import TextSLBModel
from Model.BasicModel.TextNMTModel import TextNMTModel
from Model.BasicModel.TextDSMModel import TextDSMModel
from Model.Transformer.Transformer import Transformer
from Vectorizer.CLRVectorizer import CLRVectorizer
from Vectorizer.SLBVectorizer import SLBVectorizer
from Vectorizer.NMTVectorizer import NMTVectorizer
from Vectorizer.DSMVectorizer import DSMVectorizer
from Utils.Data import read_json_dataset
from ModelTrainer import ModelTrainer
from Utils.Config import Config
import json
import sys
import os
def get_data_loaders(args, dataset):
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
if os.path.exists(args.vectorizer_file):
parameters = {'dataset': dataset,
'split_ratio': args.split_ratio,
'max_seq_length': args.max_seq_length,
'task': args.task,
'vectorizer_file': args.vectorizer_file}
dataset = TextDataset.dataset_load_vectorizer(**parameters)
else:
parameters = {'dataset': dataset,
'split_ratio': args.split_ratio,
'max_seq_length': args.max_seq_length,
'task': args.task,
'cutoff': args.cutoff}
dataset = TextDataset.dataset_make_vectorizer(**parameters)
dataset.save_vectorizer(args.vectorizer_file)
dataset.set_split('train')
train_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True)
dataset.set_split('val')
val_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True)
dataset.set_split('test')
test_data_loader = DataLoader(dataset=dataset, batch_size=args.batch_size,
shuffle=True, drop_last=True)
data_loaders = (train_data_loader, val_data_loader, test_data_loader)
return data_loaders
def get_task_model(args, vectorizer):
model = None
if args.task == 'classification':
if args.model_name == 'TextCLRModel':
model = TextCLRModel(
num_embeddings=len(vectorizer.source_vocab),
embedding_dim=args.embedding_size,
rnn_hidden_size=args.rnn_hidden_size,
num_classes=len(vectorizer.label_vocab),
padding_idx=vectorizer.source_vocab.mask_index,
batch_first=True)
if args.task == 'labeling':
if args.model_name == 'TextSLBModel':
model = TextSLBModel(
num_embeddings=len(vectorizer.source_vocab),
embedding_dim=args.embedding_size,
rnn_hidden_size=args.rnn_hidden_size,
padding_idx=vectorizer.source_vocab.mask_index,
batch_first=True)
if args.task == 'matching':
if args.model_name == 'TextDSMModel':
model = TextDSMModel(
num_embeddings1=len(vectorizer.source_vocab),
num_embeddings2=len(vectorizer.target_vocab),
embedding_dim=args.embedding_size,
rnn_hidden_size=args.rnn_hidden_size,
padding_idx=vectorizer.source_vocab.mask_index,
batch_first=True)
if args.task == 'translation':
if args.model_name == 'Transformer':
model = Transformer(
source_vocab_size=len(vectorizer.source_vocab),
target_vocab_size=len(vectorizer.target_vocab),
source_embed_dim=args.source_embed_dim,
target_embed_dim=args.target_embed_dim,
encoder_n_heads=args.encoder_n_heads,
decoder_n_heads=args.decoder_n_heads,
encoder_hid_dim=args.encoder_hid_dim,
decoder_hid_dim=args.decoder_hid_dim,
encoder_n_layers=args.encoder_n_layers,
decoder_n_layers=args.decoder_n_layers,
encoder_max_seq_len=args.max_seq_length,
decoder_max_seq_len=args.max_seq_length
)
if args.model_name == 'TextNMTModel':
model = TextNMTModel(
source_num_embeddings=len(vectorizer.source_vocab),
source_embedding_size=args.source_embedding_size,
target_num_embeddings=len(vectorizer.target_vocab),
target_embedding_size=args.target_embedding_size,
encoding_size=args.encoding_size)
return model
def get_optimizer(args, model):
if args.optimizer == 'adam':
return optim.Adam(model.parameters(), lr=args.learning_rate)
def get_loss_func(args):
if args.task == 'classification':
return nn.CrossEntropyLoss()
if args.task == 'matching':
return nn.CrossEntropyLoss()
if args.task == 'labeling':
return sequence_loss
if args.task == 'translation':
return sequence_loss
def sequence_loss(pred, target, mask_index):
pred = pred.contiguous().view(-1, pred.size(2))
target = target.contiguous().view(-1)
return F.cross_entropy(pred, target, ignore_index=mask_index)
def get_vectorizer(args):
with open(args.vectorizer_file, "r") as fp:
if args.task == 'classification':
return CLRVectorizer.from_serializable(json.load(fp))
if args.task == 'matching':
return DSMVectorizer.from_serializable(json.load(fp))
if args.task == 'labeling':
return GENVectorizer.from_serializable(json.load(fp))
if args.task == 'translation':
return NMTVectorizer.from_serializable(json.load(fp))
if __name__ == '__main__':
config_filename = sys.argv[1]
config = Config.from_config_json(config_filename)
args = config.args
dataset = read_json_dataset(args.data_filepath, args.max_seq_length)
data_loaders = get_data_loaders(args, dataset)
vectorizer = get_vectorizer(args)
model = get_task_model(args, vectorizer)
optimizer = get_optimizer(args, model)
loss_func = get_loss_func(args)
model_trainer = ModelTrainer(
args, data_loaders, model, optimizer, loss_func)
model_trainer.train_val_test_model()
| true | true |
f733a2810ed0c62aaf5db77c8205e09e414d3286 | 61,223 | py | Python | scripts/automation/trex_control_plane/interactive/trex/astf/trex_astf_client.py | kphaye/trex-core | 4b4d738182f7b3c44671c10ad6404ddd14e06498 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/interactive/trex/astf/trex_astf_client.py | kphaye/trex-core | 4b4d738182f7b3c44671c10ad6404ddd14e06498 | [
"Apache-2.0"
] | null | null | null | scripts/automation/trex_control_plane/interactive/trex/astf/trex_astf_client.py | kphaye/trex-core | 4b4d738182f7b3c44671c10ad6404ddd14e06498 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import hashlib
import sys
import time
import os
import shlex
from ..utils.common import get_current_user, user_input, PassiveTimer
from ..utils import parsing_opts, text_tables
from ..common.trex_api_annotators import client_api, console_api
from ..common.trex_client import TRexClient, NO_TCP_UDP_MASK
from ..common.trex_events import Event
from ..common.trex_exceptions import TRexError, TRexTimeoutError
from ..common.trex_types import *
from ..common.trex_types import DEFAULT_PROFILE_ID, ALL_PROFILE_ID
from .trex_astf_port import ASTFPort
from .trex_astf_profile import ASTFProfile
from .topo import ASTFTopologyManager
from .stats.traffic import CAstfTrafficStats
from .stats.latency import CAstfLatencyStats
from ..utils.common import is_valid_ipv4, is_valid_ipv6
from ..utils.text_opts import format_text
from ..astf.trex_astf_exceptions import ASTFErrorBadTG
astf_states = [
'STATE_IDLE',
'STATE_ASTF_LOADED',
'STATE_ASTF_PARSE',
'STATE_ASTF_BUILD',
'STATE_TX',
'STATE_ASTF_CLEANUP',
'STATE_ASTF_DELETE']
class TunnelType:
NONE = 0
GTP = 1
class ASTFClient(TRexClient):
port_states = [getattr(ASTFPort, state, 0) for state in astf_states]
def __init__(self,
username = get_current_user(),
server = "localhost",
sync_port = 4501,
async_port = 4500,
verbose_level = "error",
logger = None,
sync_timeout = None,
async_timeout = None):
"""
TRex advance stateful client
:parameters:
username : string
the user name, for example imarom
server : string
the server name or ip
sync_port : int
the RPC port
async_port : int
the ASYNC port (subscriber port)
verbose_level: str
one of "none", "critical", "error", "info", "debug"
logger: instance of AbstractLogger
if None, will use ScreenLogger
sync_timeout: int
time in sec for timeout for RPC commands. for local lab keep it as default (3 sec)
higher number would be more resilient for Firewalls but slower to identify real server crash
async_timeout: int
time in sec for timeout for async notification. for local lab keep it as default (3 sec)
higher number would be more resilient for Firewalls but slower to identify real server crash
"""
api_ver = {'name': 'ASTF', 'major': 2, 'minor': 0}
TRexClient.__init__(self,
api_ver,
username,
server,
sync_port,
async_port,
verbose_level,
logger,
sync_timeout,
async_timeout)
self.handler = ''
self.traffic_stats = CAstfTrafficStats(self.conn.rpc)
self.latency_stats = CAstfLatencyStats(self.conn.rpc)
self.topo_mngr = ASTFTopologyManager(self)
self.sync_waiting = False
self.last_error = ''
self.last_profile_error = {}
self.epoch = None
self.state = None
for index, state in enumerate(astf_states):
setattr(self, state, index)
self.transient_states = [
self.STATE_ASTF_PARSE,
self.STATE_ASTF_BUILD,
self.STATE_ASTF_CLEANUP,
self.STATE_ASTF_DELETE]
self.astf_profile_state = {'_': 0}
def get_mode(self):
return "ASTF"
############################ called #############################
############################ by base #############################
############################ TRex Client #############################
def _on_connect(self):
self.sync_waiting = False
self.last_error = ''
self.sync()
self.topo_mngr.sync_with_server()
return RC_OK()
def _on_connect_create_ports(self, system_info):
"""
called when connecting to the server
triggered by the common client object
"""
# create ports
port_map = {}
for port_info in system_info['ports']:
port_id = port_info['index']
port_map[port_id] = ASTFPort(self.ctx, port_id, self.conn.rpc, port_info)
return self._assign_ports(port_map)
def _on_connect_clear_stats(self):
self.traffic_stats.reset()
self.latency_stats.reset()
with self.ctx.logger.suppress(verbose = "warning"):
self.clear_stats(ports = self.get_all_ports(), clear_xstats = False, clear_traffic = False)
return RC_OK()
def _on_astf_state_chg(self, ctx_state, error, epoch):
if ctx_state < 0 or ctx_state >= len(astf_states):
raise TRexError('Unhandled ASTF state: %s' % ctx_state)
if epoch is None or self.epoch is None:
return
self.last_error = error
if error and not self.sync_waiting:
self.ctx.logger.error('Last command failed: %s' % error)
self.state = ctx_state
port_state = self.apply_port_states()
port_state_name = ASTFPort.STATES_MAP[port_state].capitalize()
if error:
return Event('server', 'error', 'Moved to state: %s after error: %s' % (port_state_name, error))
else:
return Event('server', 'info', 'Moved to state: %s' % port_state_name)
def _on_astf_profile_state_chg(self, profile_id, ctx_state, error, epoch):
if ctx_state < 0 or ctx_state >= len(astf_states):
raise TRexError('Unhandled ASTF state: %s' % ctx_state)
if epoch is None or self.epoch is None:
return
if error:
self.last_profile_error[profile_id] = error
if not self.sync_waiting:
self.ctx.logger.error('Last profile %s command failed: %s' % (profile_id, error))
# update profile state
self.astf_profile_state[profile_id] = ctx_state
if error:
return Event('server', 'error', 'Moved to profile %s state: %s after error: %s' % (profile_id, ctx_state, error))
else:
return Event('server', 'info', 'Moved to profile %s state: %s' % (profile_id, ctx_state))
def _on_astf_profile_cleared(self, profile_id, error, epoch):
if epoch is None or self.epoch is None:
return
if error:
self.last_profile_error[profile_id] = error
if not self.sync_waiting:
self.ctx.logger.error('Last profile %s command failed: %s' % (profile_id, error))
# remove profile and template group name
self.astf_profile_state.pop(profile_id, None)
self.traffic_stats._clear_tg_name(profile_id)
if error:
return Event('server', 'error', 'Can\'t remove profile %s after error: %s' % (profile_id, error))
else:
return Event('server', 'info', 'Removed profile : %s' % profile_id)
############################ helper #############################
############################ funcs #############################
############################ #############################
# Check console API ports argument
def validate_profile_id_input(self, pid_input = DEFAULT_PROFILE_ID, start = False):
valid_pids = []
ok_states = [self.STATE_IDLE, self.STATE_ASTF_LOADED]
# check profile ID's type
if type(pid_input) is not list:
profile_list = pid_input.split()
else:
profile_list = pid_input
if ALL_PROFILE_ID in profile_list:
if start == True:
raise TRexError("Cannot have %s as a profile value for start command" % ALL_PROFILE_ID)
else:
self.sync()
# return profiles can be operational only for the requests.
# STATE_IDLE is operational for 'profile_clear.'
return [pid for pid, state in self.astf_profile_state.items()
if state is not self.STATE_ASTF_DELETE]
for profile_id in profile_list:
if profile_id not in list(self.astf_profile_state.keys()):
self.sync()
break
# Check if profile_id is a valid profile name
for profile_id in profile_list:
if profile_id not in list(self.astf_profile_state.keys()):
if start == True:
self.astf_profile_state[profile_id] = self.STATE_IDLE
else:
raise TRexError("ASTF profile_id %s does not exist." % profile_id)
if start == True:
if self.is_dynamic and self.astf_profile_state.get(profile_id) not in ok_states:
raise TRexError("%s state:Transmitting, should be one of following:Idle, Loaded profile" % profile_id)
if profile_id not in valid_pids:
valid_pids.append(profile_id)
return valid_pids
def apply_port_states(self):
port_state = self.port_states[self.state]
for port in self.ports.values():
port.state = port_state
return port_state
def wait_for_steady(self, profile_id=None):
timer = PassiveTimer()
while True:
state = self._get_profile_state(profile_id) if profile_id else self.state
if state not in self.transient_states:
break
if timer.has_elapsed(0.1):
self.sync()
else:
time.sleep(0.001)
def wait_for_profile_state(self, profile_id, wait_state, timeout = None):
timer = PassiveTimer(timeout)
while self._get_profile_state(profile_id) != wait_state:
if timer.has_elapsed(0.1):
self.sync()
else:
time.sleep(0.001)
if timer.has_expired():
raise TRexTimeoutError(timeout)
def inc_epoch(self):
rc = self._transmit('inc_epoch', {'handler': self.handler})
if not rc:
raise TRexError(rc.err())
self.sync()
def _set_profile_state(self, profile_id, state):
self.astf_profile_state[profile_id] = state
def _get_profile_state(self, profile_id):
return self.astf_profile_state.get(profile_id, self.STATE_IDLE) if self.is_dynamic else self.state
def _transmit_async(self, rpc_func, ok_states, bad_states = None, ready_state = None, **k):
profile_id = k['params']['profile_id']
ok_states = listify(ok_states)
if bad_states is not None:
bad_states = listify(bad_states)
self.wait_for_steady()
if rpc_func == 'start' and self.state is not self.STATE_TX:
self.inc_epoch()
self.sync_waiting = True
try:
if ready_state:
assert ready_state not in self.transient_states
if self._get_profile_state(profile_id) != ready_state:
self.wait_for_profile_state(profile_id, ready_state)
else:
self.wait_for_steady(profile_id)
rc = self._transmit(rpc_func, **k)
if not rc:
return rc
timer = PassiveTimer()
while True:
state = self._get_profile_state(profile_id)
if state in ok_states:
return RC_OK()
# check transient state transition first to avoid wrong decision (e.g. 'start')
if ready_state and state in self.transient_states:
ready_state = None
if self.last_profile_error.get(profile_id) or (not ready_state and bad_states and state in bad_states):
error = self.last_profile_error.pop(profile_id, None)
general_error = 'Unknown error, state: {}, profile: {}'.format(state, profile_id)
return RC_ERR(error or general_error)
if timer.has_elapsed(0.2):
self.sync() # in case state change lost in async(SUB/PUB) channel
else:
time.sleep(0.001)
finally:
self.sync_waiting = False
def check_states(self, ok_states):
cnt = 0
while True:
if self.state in ok_states:
break
cnt = cnt + 1
if cnt % 10 == 0:
self.sync()
else:
time.sleep(0.1) # 100ms
self.sync() # guarantee to update profile states
def _is_service_req(self):
''' Return False as service mode check is not required in ASTF '''
return False
############################ ASTF #############################
############################ API #############################
############################ #############################
@client_api('command', True)
def reset(self, restart = False):
"""
Force acquire ports, stop the traffic, remove loaded traffic and clear stats
:parameters:
restart: bool
Restart the NICs (link down / up)
:raises:
+ :exc:`TRexError`
"""
ports = self.get_all_ports()
if restart:
self.ctx.logger.pre_cmd("Hard resetting ports {0}:".format(ports))
else:
self.ctx.logger.pre_cmd("Resetting ports {0}:".format(ports))
try:
with self.ctx.logger.suppress():
# force take the port and ignore any streams on it
self.acquire(force = True)
self.stop(False, pid_input=ALL_PROFILE_ID)
self.check_states(ok_states=[self.STATE_ASTF_LOADED, self.STATE_IDLE])
self.stop_latency()
self.traffic_stats.reset()
self.latency_stats.reset()
self.clear_profile(False, pid_input=ALL_PROFILE_ID)
self.check_states(ok_states=[self.STATE_IDLE])
self.clear_stats(ports, pid_input = ALL_PROFILE_ID)
self.set_port_attr(ports,
promiscuous = False if self.any_port.is_prom_supported() else None,
link_up = True if restart else None)
self.remove_rx_queue(ports)
self.remove_all_captures()
self._for_each_port('stop_capture_port', ports)
self.ctx.logger.post_cmd(RC_OK())
except TRexError as e:
self.ctx.logger.post_cmd(False)
raise
@client_api('command', True)
def acquire(self, force = False):
"""
Acquires ports for executing commands
:parameters:
force : bool
Force acquire the ports.
:raises:
+ :exc:`TRexError`
"""
ports = self.get_all_ports()
if force:
self.ctx.logger.pre_cmd('Force acquiring ports %s:' % ports)
else:
self.ctx.logger.pre_cmd('Acquiring ports %s:' % ports)
params = {'force': force,
'user': self.ctx.username,
'session_id': self.ctx.session_id}
rc = self._transmit('acquire', params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError('Could not acquire context: %s' % rc.err())
self.handler = rc.data()['handler']
for port_id, port_rc in rc.data()['ports'].items():
self.ports[int(port_id)]._set_handler(port_rc)
self._post_acquire_common(ports)
@client_api('command', True)
def sync(self):
self.epoch = None
params = {'profile_id': "sync"}
rc = self._transmit('sync', params)
if not rc:
raise TRexError(rc.err())
self.state = rc.data()['state']
self.apply_port_states()
if self.is_dynamic:
self.astf_profile_state = rc.data()['state_profile']
else:
self.astf_profile_state[DEFAULT_PROFILE_ID] = self.state
self.epoch = rc.data()['epoch']
return self.astf_profile_state
@client_api('command', True)
def release(self, force = False):
"""
Release ports
:parameters:
none
:raises:
+ :exc:`TRexError`
"""
ports = self.get_acquired_ports()
self.ctx.logger.pre_cmd("Releasing ports {0}:".format(ports))
params = {'handler': self.handler}
rc = self._transmit('release', params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError('Could not release context: %s' % rc.err())
self.handler = ''
for port_id in ports:
self.ports[port_id]._clear_handler()
def _upload_fragmented(self, rpc_cmd, upload_string, pid_input = DEFAULT_PROFILE_ID):
index_start = 0
fragment_length = 1000 # first fragment is small, we compare hash before sending the rest
while len(upload_string) > index_start:
index_end = index_start + fragment_length
params = {
'handler': self.handler,
'profile_id' : pid_input,
'fragment': upload_string[index_start:index_end],
}
if index_start == 0:
params['frag_first'] = True
if index_end >= len(upload_string):
params['frag_last'] = True
if params.get('frag_first') and not params.get('frag_last'):
params['md5'] = hashlib.md5(upload_string.encode()).hexdigest()
rc = self._transmit(rpc_cmd, params = params)
if not rc:
return rc
if params.get('frag_first') and not params.get('frag_last'):
if rc.data() and rc.data().get('matches_loaded'):
break
index_start = index_end
fragment_length = 500000 # rest of fragments are larger
return RC_OK()
@client_api('command', True)
def set_service_mode (self, ports = None, enabled = True, filtered = False, mask = None):
''' based on :meth:`trex.astf.trex_astf_client.ASTFClient.set_service_mode_base` '''
# call the base method
self.set_service_mode_base(ports = ports, enabled = enabled, filtered = filtered, mask = mask)
# in ASTF send to all ports with the handler of the ctx
params = {"handler": self.handler,
"enabled": enabled,
"filtered": filtered}
if filtered:
params['mask'] = mask
# transmit server once for all the ports
rc = self._transmit('service', params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc)
else:
# sending all ports in order to change their attributes
self._for_each_port('set_service_mode', None, enabled, filtered, mask)
@client_api('command', True)
def load_profile(self, profile, tunables = {}, pid_input = DEFAULT_PROFILE_ID):
""" Upload ASTF profile to server
:parameters:
profile: string or ASTFProfile
Path to profile filename or profile object
tunables: dict
forward those key-value pairs to the profile file
pid_input: string
Input profile ID
:raises:
+ :exc:`TRexError`
"""
if not isinstance(profile, ASTFProfile):
try:
profile = ASTFProfile.load(profile, **tunables)
except Exception as e:
self.astf_profile_state.pop(pid_input, None)
raise TRexError('Could not load profile: %s' % e)
#when ".. -t --help", is called then return
if profile is None:
return
profile_json = profile.to_json_str(pretty = False, sort_keys = True)
self.ctx.logger.pre_cmd('Loading traffic at acquired ports.')
rc = self._upload_fragmented('profile_fragment', profile_json, pid_input = pid_input)
if not rc:
self.ctx.logger.post_cmd(False)
raise TRexError('Could not load profile, error: %s' % rc.err())
self.ctx.logger.post_cmd(True)
@client_api('command', False)
def get_traffic_distribution(self, start_ip, end_ip, dual_ip, seq_split):
''' Get distribution of IP range per TRex port per core
:parameters:
start_ip: IP string
Related to "ip_range" argument of ASTFIPGenDist
end_ip: IP string
Related to "ip_range" argument of ASTFIPGenDist
dual_ip: IP string
Related to "ip_offset" argument of ASTFIPGenGlobal
seq_split: bool
Related to "per_core_distribution" argument of ASTFIPGenDist, "seq" => seq_split=True
'''
if not is_valid_ipv4(start_ip):
raise TRexError("start_ip is not a valid IPv4 address: '%s'" % start_ip)
if not is_valid_ipv4(end_ip):
raise TRexError("end_ip is not a valid IPv4 address: '%s'" % end_ip)
if not is_valid_ipv4(dual_ip):
raise TRexError("dual_ip is not a valid IPv4 address: '%s'" % dual_ip)
params = {
'start_ip': start_ip,
'end_ip': end_ip,
'dual_ip': dual_ip,
'seq_split': seq_split,
}
rc = self._transmit('get_traffic_dist', params = params)
if not rc:
raise TRexError(rc.err())
res = {}
for port_id, port_data in rc.data().items():
core_dict = {}
for core_id, core_data in port_data.items():
core_dict[int(core_id)] = core_data
res[int(port_id)] = core_dict
return res
@client_api('command', True)
def clear_profile(self, block = True, pid_input = DEFAULT_PROFILE_ID):
"""
Clear loaded profile
:parameters:
pid_input: string
Input profile ID
:raises:
+ :exc:`TRexError`
"""
ok_states = [self.STATE_IDLE, self.STATE_ASTF_LOADED]
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
profile_state = self.astf_profile_state.get(profile_id)
if profile_state in ok_states:
params = {
'handler': self.handler,
'profile_id': profile_id
}
self.ctx.logger.pre_cmd('Clearing loaded profile.')
if block:
rc = self._transmit_async('profile_clear', params = params, ok_states = self.STATE_IDLE)
else:
rc = self._transmit('profile_clear', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
else:
self.logger.info(format_text("Cannot remove a profile: %s is not state IDLE and state LOADED.\n" % profile_id, "bold", "magenta"))
@client_api('command', True)
def start(self, mult = 1, duration = -1, nc = False, block = True, latency_pps = 0, ipv6 = False, pid_input = DEFAULT_PROFILE_ID, client_mask = 0xffffffff):
"""
Start the traffic on loaded profile. Procedure is async.
:parameters:
mult: int
Multiply total CPS of profile by this value.
duration: float
Start new flows for this duration.
Negative value means infinite
nc: bool
Do not wait for flows to close at end of duration.
block: bool
Wait for traffic to be started (operation is async).
latency_pps: uint32_t
Rate of latency packets. Zero value means disable.
ipv6: bool
Convert traffic to IPv6.
client_mask: uint32_t
Bitmask of enabled client ports.
pid_input: string
Input profile ID
:raises:
+ :exc:`TRexError`
"""
params = {
'handler': self.handler,
'profile_id': pid_input,
'mult': mult,
'nc': nc,
'duration': duration,
'latency_pps': latency_pps,
'ipv6': ipv6,
'client_mask': client_mask,
}
self.ctx.logger.pre_cmd('Starting traffic.')
valid_pids = self.validate_profile_id_input(pid_input, start = True)
for profile_id in valid_pids:
if block:
rc = self._transmit_async('start', params = params, ok_states = self.STATE_TX, bad_states = self.STATE_ASTF_LOADED, ready_state = self.STATE_ASTF_LOADED)
else:
rc = self._transmit('start', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def stop(self, block = True, pid_input = DEFAULT_PROFILE_ID, is_remove = False):
"""
Stop the traffic.
:parameters:
block: bool
Wait for traffic to be stopped (operation is async)
Default is True
pid_input: string
Input profile ID
is_remove: bool
Remove the profile id
Default is False
:raises:
+ :exc:`TRexError`
"""
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
profile_state = self.astf_profile_state.get(profile_id)
# 'stop' will be silently ignored in server-side PARSE/BUILD state.
# So, TX state should be forced to avoid unexpected hanging situation.
if profile_state in {self.STATE_ASTF_PARSE, self.STATE_ASTF_BUILD}:
self.wait_for_profile_state(profile_id, self.STATE_TX)
profile_state = self.astf_profile_state.get(profile_id)
if profile_state is self.STATE_TX:
params = {
'handler': self.handler,
'profile_id': profile_id
}
self.ctx.logger.pre_cmd('Stopping traffic.')
if block or is_remove:
rc = self._transmit_async('stop', params = params, ok_states = [self.STATE_IDLE, self.STATE_ASTF_LOADED])
else:
rc = self._transmit('stop', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
profile_state = self.astf_profile_state.get(profile_id)
if is_remove:
if profile_state is self.STATE_ASTF_CLEANUP:
self.wait_for_profile_state(profile_id, self.STATE_ASTF_LOADED)
self.clear_profile(block = block, pid_input = profile_id)
@client_api('command', True)
def update(self, mult, pid_input = DEFAULT_PROFILE_ID):
"""
Update the rate of running traffic.
:parameters:
mult: int
Multiply total CPS of profile by this value (not relative to current running rate)
Default is 1
pid_input: string
Input profile ID
:raises:
+ :exc:`TRexError`
"""
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
params = {
'handler': self.handler,
'profile_id': profile_id,
'mult': mult,
}
self.ctx.logger.pre_cmd('Updating traffic.')
rc = self._transmit('update', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def get_profiles(self):
"""
Get profile list from Server.
"""
params = {
'handler': self.handler,
}
self.ctx.logger.pre_cmd('Getting profile list.')
rc = self._transmit('get_profile_list', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def wait_on_traffic(self, timeout = None, profile_id = None):
"""
Block until traffic stops
:parameters:
timeout: int
Timeout in seconds
Default is blocking
profile_id: string
Profile ID
:raises:
+ :exc:`TRexTimeoutError` - in case timeout has expired
+ :exc:`TRexError`
"""
if profile_id is None:
ports = self.get_all_ports()
TRexClient.wait_on_traffic(self, ports, timeout)
else:
self.wait_for_profile_state(profile_id, self.STATE_ASTF_LOADED, timeout)
# get stats
@client_api('getter', True)
def get_stats(self, ports = None, sync_now = True, skip_zero = True, pid_input = DEFAULT_PROFILE_ID, is_sum = False):
"""
Gets all statistics on given ports, traffic and latency.
:parameters:
ports: list
sync_now: boolean
skip_zero: boolean
pid_input: string
Input profile ID
is_sum: boolean
Get total counter values
"""
stats = self._get_stats_common(ports, sync_now)
stats['traffic'] = self.get_traffic_stats(skip_zero, pid_input, is_sum = is_sum)
stats['latency'] = self.get_latency_stats(skip_zero)
return stats
# clear stats
@client_api('getter', True)
def clear_stats(self,
ports = None,
clear_global = True,
clear_xstats = True,
clear_traffic = True,
pid_input = DEFAULT_PROFILE_ID):
"""
Clears statistics in given ports.
:parameters:
ports: list
clear_global: boolean
clear_xstats: boolean
clear_traffic: boolean
pid_input: string
Input profile ID
"""
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
if clear_traffic:
self.clear_traffic_stats(profile_id)
self.clear_traffic_stats(is_sum = True)
return self._clear_stats_common(ports, clear_global, clear_xstats)
@client_api('getter', True)
def get_tg_names(self, pid_input = DEFAULT_PROFILE_ID):
"""
Returns a list of the names of all template groups defined in the current profile.
:parameters:
pid_input: string
Input profile ID
:raises:
+ :exc:`TRexError`
"""
return self.traffic_stats.get_tg_names(pid_input)
@client_api('getter', True)
def get_traffic_tg_stats(self, tg_names, skip_zero=True, pid_input = DEFAULT_PROFILE_ID):
"""
Returns the traffic statistics for the template groups specified in tg_names.
:parameters:
tg_names: list or string
Contains the names of the template groups for which we want to get traffic statistics.
skip_zero: boolean
pid_input: string
Input profile ID
:raises:
+ :exc:`TRexError`
+ :exc:`ASTFErrorBadTG`
Can be thrown if tg_names is empty or contains a invalid name.
"""
validate_type('tg_names', tg_names, (list, basestring))
return self.traffic_stats.get_traffic_tg_stats(tg_names, skip_zero, pid_input = pid_input)
@client_api('getter', True)
def get_traffic_stats(self, skip_zero = True, pid_input = DEFAULT_PROFILE_ID, is_sum = False):
"""
Returns aggregated traffic statistics.
:parameters:
skip_zero: boolean
pid_input: string
Input profile ID
is_sum: boolean
Get total counter values
"""
return self.traffic_stats.get_stats(skip_zero, pid_input = pid_input, is_sum = is_sum)
@client_api('getter', True)
def get_profiles_state(self):
"""
Gets an dictionary with the states of all the profiles.
:returns:
Dictionary containing profiles and their states. Keys are strings, `pid` (profile ID). Each profile can be in one of the following states:
['STATE_IDLE', 'STATE_ASTF_LOADED', 'STATE_ASTF_PARSE', 'STATE_ASTF_BUILD', 'STATE_TX', 'STATE_ASTF_CLEANUP', 'STATE_ASTF_DELETE', 'STATE_UNKNOWN'].
"""
states = {}
for key, value in self.astf_profile_state.items():
states[key] = astf_states[value] if value else "STATE_UNKNOWN"
return states
@client_api('getter', True)
def is_traffic_stats_error(self, stats):
'''
Return Tuple if there is an error and what is the error (Bool,Errors)
:parameters:
stats: dict from get_traffic_stats output
'''
return self.traffic_stats.is_traffic_stats_error(stats)
@client_api('getter', True)
def clear_traffic_stats(self, pid_input = DEFAULT_PROFILE_ID, is_sum = False):
"""
Clears traffic statistics.
:parameters:
pid_input: string
Input profile ID
"""
return self.traffic_stats.clear_stats(pid_input, is_sum)
@client_api('getter', True)
def get_latency_stats(self,skip_zero =True):
"""
Gets latency statistics.
:parameters:
skip_zero: boolean
"""
return self.latency_stats.get_stats(skip_zero)
@client_api('command', True)
def start_latency(self, mult = 1, src_ipv4="16.0.0.1", dst_ipv4="48.0.0.1", ports_mask=0x7fffffff, dual_ipv4 = "1.0.0.0"):
'''
Start ICMP latency traffic.
:parameters:
mult: float
number of packets per second
src_ipv4: IP string
IPv4 source address for the port
dst_ipv4: IP string
IPv4 destination address
ports_mask: uint32_t
bitmask of ports
dual_ipv4: IP string
IPv4 address to be added for each pair of ports (starting from second pair)
.. note::
VLAN will be taken from interface configuration
:raises:
+ :exc:`TRexError`
'''
if not is_valid_ipv4(src_ipv4):
raise TRexError("src_ipv4 is not a valid IPv4 address: '{0}'".format(src_ipv4))
if not is_valid_ipv4(dst_ipv4):
raise TRexError("dst_ipv4 is not a valid IPv4 address: '{0}'".format(dst_ipv4))
if not is_valid_ipv4(dual_ipv4):
raise TRexError("dual_ipv4 is not a valid IPv4 address: '{0}'".format(dual_ipv4))
params = {
'handler': self.handler,
'mult': mult,
'src_addr': src_ipv4,
'dst_addr': dst_ipv4,
'dual_port_addr': dual_ipv4,
'mask': ports_mask,
}
self.ctx.logger.pre_cmd('Starting latency traffic.')
rc = self._transmit("start_latency", params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def stop_latency(self):
'''
Stop latency traffic.
'''
params = {
'handler': self.handler
}
self.ctx.logger.pre_cmd('Stopping latency traffic.')
rc = self._transmit("stop_latency", params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def update_latency(self, mult = 1):
'''
Update rate of latency traffic.
:parameters:
mult: float
number of packets per second
:raises:
+ :exc:`TRexError`
'''
params = {
'handler': self.handler,
'mult': mult,
}
self.ctx.logger.pre_cmd('Updating latency rate.')
rc = self._transmit("update_latency", params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def topo_load(self, topology, tunables = {}):
''' Load network topology
:parameters:
topology: string or ASTFTopology
| Path to topology filename or topology object
| Supported file formats:
| * JSON
| * YAML
| * Python
tunables: dict
forward those key-value pairs to the topology Python file
:raises:
+ :exc:`TRexError`
'''
self.topo_mngr.load(topology, **tunables)
print('')
@client_api('command', True)
def topo_clear(self):
''' Clear network topology '''
self.topo_mngr.clear()
@client_api('command', True)
def topo_resolve(self, ports = None):
''' Resolve current network topology. On success, upload to server '''
self.topo_mngr.resolve(ports)
@client_api('command', False)
def topo_show(self, ports = None):
''' Show current network topology status '''
self.topo_mngr.show(ports)
print('')
@client_api('command', False)
def topo_save(self, filename):
'''
Save current topology to file
:parameters:
filename: string
| Path to topology filename, supported formats:
| * JSON
| * YAML
| * Python
'''
if os.path.exists(filename):
if os.path.islink(filename) or not os.path.isfile(filename):
raise TRexError("Given path exists and it's not a file!")
sys.stdout.write('\nFilename %s already exists, overwrite? (y/N) ' % filename)
ans = user_input().strip()
if ans.lower() not in ('y', 'yes'):
print('Not saving.')
return
try:
if filename.endswith('.json'):
self.ctx.logger.pre_cmd('Saving topology to JSON: %s' % filename)
code = self.topo_mngr.to_json(False)
elif filename.endswith('.yaml'):
self.ctx.logger.pre_cmd('Saving topology to YAML: %s' % filename)
code = self.topo_mngr.to_yaml()
elif filename.endswith('.py'):
self.ctx.logger.pre_cmd('Saving topology to Python script: %s' % filename)
code = self.topo_mngr.to_code()
else:
self.ctx.logger.error('Saved filename should be .py or .json or .yaml')
return
with open(filename, 'w') as f:
f.write(code)
except Exception as e:
self.ctx.logger.post_cmd(False)
raise TRexError('Saving file failed: %s' % e)
self.ctx.logger.post_cmd(True)
# private function to form json data for GTP tunnel
def _update_gtp_tunnel(self, client_list):
json_attr = []
for key, value in client_list.items():
json_attr.append({'client_ip' : key, 'sip': value.sip, 'dip' : value.dip, 'teid' : value.teid, "version" :value.version})
return json_attr
# execute 'method' for inserting/updateing tunnel info for clients
def update_tunnel_client_record (self, client_list, tunnel_type):
json_attr = []
if tunnel_type == TunnelType.GTP:
json_attr = self._update_gtp_tunnel(client_list)
else:
raise TRexError('Invalid Tunnel Type: %d' % tunnel_type)
params = {"tunnel_type": tunnel_type,
"attr": json_attr }
return self._transmit("update_tunnel_client", params)
# execute 'method' for Making a client active/inactive
def set_client_enable(self, client_list, is_enable):
'''
Version: 1
API to toggle state of client
Input: List of clients and Action : state flag
'''
json_attr = []
for key in client_list:
json_attr.append({'client_ip' : key})
params = {"is_enable": is_enable,
"is_range": False,
"attr": json_attr }
return self._transmit("enable_disable_client", params)
# execute 'method' for Making a client active/inactive
def set_client_enable_range(self, client_start, client_end, is_enable):
'''
Version: 2
API to toggle state of client
Input: Client range and Action : state flag
'''
json_attr = []
json_attr.append({'client_start_ip' : client_start, 'client_end_ip' : client_end})
params = {"is_enable": is_enable,
"is_range": True,
"attr": json_attr }
return self._transmit("enable_disable_client", params)
# execute 'method' for getting clients stats
def get_clients_info (self, client_list):
'''
Version 1
API to get client information: Currently only state and if client is present.
Input: List of clients
'''
json_attr = []
for key in client_list:
json_attr.append({'client_ip' : key})
params = {"is_range": False,
"attr": json_attr }
return self._transmit("get_clients_info", params)
# execute 'method' for getting clients stats
def get_clients_info_range (self, client_start, client_end):
'''
Version 2
API to get client information: Currently only state and if client is present.
Input: Client range
'''
json_attr = []
json_attr.append({'client_start_ip' : client_start, 'client_end_ip' : client_end})
params = {"is_range": True,
"attr": json_attr }
return self._transmit("get_clients_info", params)
############################ console #############################
############################ commands #############################
############################ #############################
@console_api('acquire', 'common', True)
def acquire_line (self, line):
'''Acquire ports\n'''
# define a parser
parser = parsing_opts.gen_parser(
self,
'acquire',
self.acquire_line.__doc__,
parsing_opts.FORCE)
opts = parser.parse_args(shlex.split(line))
self.acquire(force = opts.force)
return True
@console_api('reset', 'common', True)
def reset_line(self, line):
'''Reset ports'''
parser = parsing_opts.gen_parser(
self,
'reset',
self.reset_line.__doc__,
parsing_opts.PORT_RESTART
)
opts = parser.parse_args(shlex.split(line))
self.reset(restart = opts.restart)
return True
@console_api('start', 'ASTF', True)
def start_line(self, line):
'''Start traffic command'''
# parse tunables with the previous form. (-t var1=x1,var2=x2..)
def parse_tunables_old_version(tunables_parameters):
parser = parsing_opts.gen_parser(self,
"start",
self.start_line.__doc__,
parsing_opts.TUNABLES)
args = parser.parse_args(tunables_parameters.split())
return args.tunables
# parser for parsing the start command arguments
parser = parsing_opts.gen_parser(self,
'start',
self.start_line.__doc__,
parsing_opts.FILE_PATH,
parsing_opts.MULTIPLIER_NUM,
parsing_opts.DURATION,
parsing_opts.ARGPARSE_TUNABLES,
parsing_opts.ASTF_NC,
parsing_opts.ASTF_LATENCY,
parsing_opts.ASTF_IPV6,
parsing_opts.ASTF_CLIENT_CTRL,
parsing_opts.ASTF_PROFILE_LIST
)
opts = parser.parse_args(shlex.split(line))
help_flags = ('-h', '--help')
# if the user chose to pass the tunables arguments in previous version (-t var1=x1,var2=x2..)
# we decode the tunables and then convert the output from dictionary to list in order to have the same format with the
# newer version.
tunable_dict = {}
if "-t" in line and '=' in line:
tunable_parameter = "-t " + line.split("-t")[1].strip("-h").strip("--help").strip()
tunable_dict = parse_tunables_old_version(tunable_parameter)
tunable_list = []
# converting from tunables dictionary to list
for tunable_key in tunable_dict:
tunable_list.extend(["--{}".format(tunable_key), str(tunable_dict[tunable_key])])
if any(h in opts.tunables for h in help_flags):
tunable_list.append("--help")
opts.tunables = tunable_list
tunable_dict["tunables"] = opts.tunables
valid_pids = self.validate_profile_id_input(opts.profiles, start = True)
for profile_id in valid_pids:
self.load_profile(opts.file[0], tunable_dict, pid_input = profile_id)
#when ".. -t --help", is called the help message is being printed once and then it returns to the console
if any(h in opts.tunables for h in help_flags):
break
kw = {}
if opts.clients:
for client in opts.clients:
if client not in self.ports:
raise TRexError('Invalid client interface: %d' % client)
if client & 1:
raise TRexError('Following interface is not client: %d' % client)
kw['client_mask'] = self._calc_port_mask(opts.clients)
elif opts.servers_only:
kw['client_mask'] = 0
self.start(opts.mult, opts.duration, opts.nc, False, opts.latency_pps, opts.ipv6, pid_input = profile_id, **kw)
return True
@console_api('stop', 'ASTF', True)
def stop_line(self, line):
'''Stop traffic command'''
parser = parsing_opts.gen_parser(
self,
'stop',
self.stop_line.__doc__,
parsing_opts.ASTF_PROFILE_DEFAULT_LIST,
parsing_opts.REMOVE
)
opts = parser.parse_args(shlex.split(line))
self.stop(False, pid_input = opts.profiles, is_remove = opts.remove)
@console_api('update', 'ASTF', True)
def update_line(self, line):
'''Update traffic multiplier'''
parser = parsing_opts.gen_parser(
self,
'update',
self.update_line.__doc__,
parsing_opts.MULTIPLIER_NUM,
parsing_opts.ASTF_PROFILE_DEFAULT_LIST
)
opts = parser.parse_args(shlex.split(line))
self.update(opts.mult, pid_input = opts.profiles)
@console_api('service', 'ASTF', True)
def service_line (self, line):
'''Configures port for service mode.
In service mode ports will reply to ARP, PING
and etc.
In ASTF, command will apply on all ports.
'''
parser = parsing_opts.gen_parser(self,
"service",
self.service_line.__doc__,
parsing_opts.SERVICE_GROUP)
opts = parser.parse_args(line.split())
enabled, filtered, mask = self._get_service_params(opts)
if mask is not None and ((mask & NO_TCP_UDP_MASK) == 0):
raise TRexError('Cannot set NO_TCP_UDP off in ASTF!')
self.set_service_mode(enabled = enabled, filtered = filtered, mask = mask)
return True
@staticmethod
def _calc_port_mask(ports):
mask =0
for p in ports:
mask += (1<<p)
return mask
@console_api('latency', 'ASTF', True)
def latency_line(self, line):
'''Latency-related commands'''
parser = parsing_opts.gen_parser(
self,
'latency',
self.latency_line.__doc__)
def latency_add_parsers(subparsers, cmd, help = '', **k):
return subparsers.add_parser(cmd, description = help, help = help, **k)
subparsers = parser.add_subparsers(title = 'commands', dest = 'command', metavar = '')
start_parser = latency_add_parsers(subparsers, 'start', help = 'Start latency traffic')
latency_add_parsers(subparsers, 'stop', help = 'Stop latency traffic')
update_parser = latency_add_parsers(subparsers, 'update', help = 'Update rate of running latency')
latency_add_parsers(subparsers, 'show', help = 'alias for stats -l')
latency_add_parsers(subparsers, 'hist', help = 'alias for stats --lh')
latency_add_parsers(subparsers, 'counters', help = 'alias for stats --lc')
start_parser.add_arg_list(
parsing_opts.MULTIPLIER_NUM,
parsing_opts.SRC_IPV4,
parsing_opts.DST_IPV4,
parsing_opts.PORT_LIST,
parsing_opts.DUAL_IPV4
)
update_parser.add_arg_list(
parsing_opts.MULTIPLIER_NUM,
)
opts = parser.parse_args(shlex.split(line))
if opts.command == 'start':
ports_mask = self._calc_port_mask(opts.ports)
self.start_latency(opts.mult, opts.src_ipv4, opts.dst_ipv4, ports_mask, opts.dual_ip)
elif opts.command == 'stop':
self.stop_latency()
elif opts.command == 'update':
self.update_latency(mult = opts.mult)
elif opts.command == 'show' or not opts.command:
self._show_latency_stats()
elif opts.command == 'hist':
self._show_latency_histogram()
elif opts.command == 'counters':
self._show_latency_counters()
else:
raise TRexError('Unhandled command %s' % opts.command)
return True
@console_api('topo', 'ASTF', True, True)
def topo_line(self, line):
'''Topology-related commands'''
parser = parsing_opts.gen_parser(
self,
'topo',
self.topo_line.__doc__)
def topology_add_parsers(subparsers, cmd, help = '', **k):
return subparsers.add_parser(cmd, description = help, help = help, **k)
subparsers = parser.add_subparsers(title = 'commands', dest = 'command', metavar = '')
load_parser = topology_add_parsers(subparsers, 'load', help = 'Load topology from file')
reso_parser = topology_add_parsers(subparsers, 'resolve', help = 'Resolve loaded topology, push to server on success')
show_parser = topology_add_parsers(subparsers, 'show', help = 'Show current topology status')
topology_add_parsers(subparsers, 'clear', help = 'Clear current topology')
save_parser = topology_add_parsers(subparsers, 'save', help = 'Save topology to file')
load_parser.add_arg_list(
parsing_opts.FILE_PATH,
parsing_opts.TUNABLES,
)
reso_parser.add_arg_list(
parsing_opts.PORT_LIST_NO_DEFAULT,
)
show_parser.add_arg_list(
parsing_opts.PORT_LIST_NO_DEFAULT,
)
save_parser.add_arg_list(
parsing_opts.FILE_PATH_NO_CHECK,
)
opts = parser.parse_args(shlex.split(line))
if opts.command == 'load':
self.topo_load(opts.file[0], opts.tunables)
return False
elif opts.command == 'resolve':
self.topo_resolve(opts.ports_no_default)
elif opts.command == 'show' or not opts.command:
self.topo_show(opts.ports_no_default)
return False
elif opts.command == 'clear':
self.topo_clear()
elif opts.command == 'save':
self.topo_save(opts.file[0])
else:
raise TRexError('Unhandled command %s' % opts.command)
return True
@console_api('clear', 'common', False)
def clear_stats_line (self, line):
'''Clear cached local statistics\n'''
# define a parser
parser = parsing_opts.gen_parser(self,
"clear",
self.clear_stats_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL)
opts = parser.parse_args(line.split())
self.clear_stats(opts.ports, pid_input = ALL_PROFILE_ID)
return RC_OK()
@console_api('stats', 'common', True)
def show_stats_line (self, line):
'''Show various statistics\n'''
# define a parser
parser = parsing_opts.gen_parser(
self,
'stats',
self.show_stats_line.__doc__,
parsing_opts.PORT_LIST,
parsing_opts.ASTF_STATS_GROUP,
parsing_opts.ASTF_PROFILE_STATS)
astf_profiles_state = self.get_profiles_state()
valid_pids = list(astf_profiles_state.keys())
opts = parser.parse_args(shlex.split(line))
if not opts:
return
# without parameters show only global and ports
if not opts.stats:
self._show_global_stats()
self._show_port_stats(opts.ports)
return
if self.is_dynamic == True and opts.pfname == None:
is_sum = True
valid_pids = self.validate_profile_id_input(pid_input = DEFAULT_PROFILE_ID)
else:
is_sum = False
valid_pids = self.validate_profile_id_input(pid_input = opts.pfname)
# decode which stats to show
if opts.stats == 'global':
self._show_global_stats()
elif opts.stats == 'ports':
self._show_port_stats(opts.ports)
elif opts.stats == 'xstats':
self._show_port_xstats(opts.ports, False)
elif opts.stats == 'xstats_inc_zero':
self._show_port_xstats(opts.ports, True)
elif opts.stats == 'status':
self._show_port_status(opts.ports)
elif opts.stats == 'cpu':
self._show_cpu_util()
elif opts.stats == 'mbuf':
self._show_mbuf_util()
elif opts.stats == 'astf':
for profile_id in valid_pids:
self._show_traffic_stats(False, pid_input = profile_id, is_sum = is_sum)
elif opts.stats == 'astf_inc_zero':
for profile_id in valid_pids:
self._show_traffic_stats(True, pid_input = profile_id, is_sum = is_sum)
elif opts.stats == 'latency':
self._show_latency_stats()
elif opts.stats == 'latency_histogram':
self._show_latency_histogram()
elif opts.stats == 'latency_counters':
self._show_latency_counters()
else:
raise TRexError('Unhandled stat: %s' % opts.stats)
@console_api('template_group', 'ASTF', True)
def template_group_line(self, line):
"Template group commands"
parser = parsing_opts.gen_parser(
self,
'template_group',
self.template_group_line.__doc__
)
def template_group_add_parsers(subparsers, cmd, help = '', **k):
return subparsers.add_parser(cmd, description = help, help = help, **k)
subparsers = parser.add_subparsers(title = 'commands', dest = 'command', metavar = '')
names_parser = template_group_add_parsers(subparsers, 'names', help = 'Get template group names')
stats_parser = template_group_add_parsers(subparsers, 'stats', help = 'Get stats for template group')
names_parser.add_arg_list(parsing_opts.TG_NAME_START)
names_parser.add_arg_list(parsing_opts.TG_NAME_AMOUNT)
names_parser.add_arg_list(parsing_opts.ASTF_PROFILE_LIST)
stats_parser.add_arg_list(parsing_opts.TG_STATS)
stats_parser.add_arg_list(parsing_opts.ASTF_PROFILE_LIST)
opts = parser.parse_args(shlex.split(line))
if not opts:
return
pid_input = opts.profiles
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
if opts.command == 'names':
self.traffic_stats._show_tg_names(start=opts.start, amount=opts.amount, pid_input = profile_id)
elif opts.command == 'stats':
try:
self.get_tg_names(profile_id)
tgid = self.traffic_stats._translate_names_to_ids(opts.name, pid_input = profile_id)
self._show_traffic_stats(include_zero_lines=False, tgid = tgid[0], pid_input = profile_id)
except ASTFErrorBadTG:
print(format_text("Template group name %s doesn't exist!" % opts.name, 'bold'))
else:
raise TRexError('Unhandled command: %s' % opts.command)
def _get_num_of_tgids(self, pid_input = DEFAULT_PROFILE_ID):
return self.traffic_stats._get_num_of_tgids(pid_input)
def _show_traffic_stats(self, include_zero_lines, buffer = sys.stdout, tgid = 0, pid_input = DEFAULT_PROFILE_ID, is_sum = False):
table = self.traffic_stats.to_table(include_zero_lines, tgid, pid_input, is_sum = is_sum)
text_tables.print_table_with_header(table, untouched_header = table.title, buffer = buffer)
def _show_latency_stats(self, buffer = sys.stdout):
table = self.latency_stats.to_table_main()
text_tables.print_table_with_header(table, untouched_header = table.title, buffer = buffer)
def _show_latency_histogram(self, buffer = sys.stdout):
table = self.latency_stats.histogram_to_table()
text_tables.print_table_with_header(table, untouched_header = table.title, buffer = buffer)
def _show_latency_counters(self, buffer = sys.stdout):
table = self.latency_stats.counters_to_table()
text_tables.print_table_with_header(table, untouched_header = table.title, buffer = buffer)
def _show_profiles_states(self, buffer = sys.stdout):
table = text_tables.TRexTextTable()
table.set_cols_align(["c"] + ["c"])
table.set_cols_width([20] + [20])
table.header(["ID", "State"])
self.sync()
profiles_state = sorted(self.get_profiles_state().items())
for profile_id, state in profiles_state:
table.add_row([
profile_id,
state
])
return table
@console_api('profiles', 'ASTF', True, True)
def profiles_line(self, line):
'''Get loaded to profiles information'''
parser = parsing_opts.gen_parser(self,
"profiles",
self.profiles_line.__doc__)
opts = parser.parse_args(line.split())
if not opts:
return opts
table = self._show_profiles_states()
if not table:
self.logger.info(format_text("No profiles found with desired filter.\n", "bold", "magenta"))
text_tables.print_table_with_header(table, header = 'Profile states')
| 35.0246 | 169 | 0.560459 | from __future__ import print_function
import hashlib
import sys
import time
import os
import shlex
from ..utils.common import get_current_user, user_input, PassiveTimer
from ..utils import parsing_opts, text_tables
from ..common.trex_api_annotators import client_api, console_api
from ..common.trex_client import TRexClient, NO_TCP_UDP_MASK
from ..common.trex_events import Event
from ..common.trex_exceptions import TRexError, TRexTimeoutError
from ..common.trex_types import *
from ..common.trex_types import DEFAULT_PROFILE_ID, ALL_PROFILE_ID
from .trex_astf_port import ASTFPort
from .trex_astf_profile import ASTFProfile
from .topo import ASTFTopologyManager
from .stats.traffic import CAstfTrafficStats
from .stats.latency import CAstfLatencyStats
from ..utils.common import is_valid_ipv4, is_valid_ipv6
from ..utils.text_opts import format_text
from ..astf.trex_astf_exceptions import ASTFErrorBadTG
astf_states = [
'STATE_IDLE',
'STATE_ASTF_LOADED',
'STATE_ASTF_PARSE',
'STATE_ASTF_BUILD',
'STATE_TX',
'STATE_ASTF_CLEANUP',
'STATE_ASTF_DELETE']
class TunnelType:
NONE = 0
GTP = 1
class ASTFClient(TRexClient):
port_states = [getattr(ASTFPort, state, 0) for state in astf_states]
def __init__(self,
username = get_current_user(),
server = "localhost",
sync_port = 4501,
async_port = 4500,
verbose_level = "error",
logger = None,
sync_timeout = None,
async_timeout = None):
api_ver = {'name': 'ASTF', 'major': 2, 'minor': 0}
TRexClient.__init__(self,
api_ver,
username,
server,
sync_port,
async_port,
verbose_level,
logger,
sync_timeout,
async_timeout)
self.handler = ''
self.traffic_stats = CAstfTrafficStats(self.conn.rpc)
self.latency_stats = CAstfLatencyStats(self.conn.rpc)
self.topo_mngr = ASTFTopologyManager(self)
self.sync_waiting = False
self.last_error = ''
self.last_profile_error = {}
self.epoch = None
self.state = None
for index, state in enumerate(astf_states):
setattr(self, state, index)
self.transient_states = [
self.STATE_ASTF_PARSE,
self.STATE_ASTF_BUILD,
self.STATE_ASTF_CLEANUP,
self.STATE_ASTF_DELETE]
self.astf_profile_state = {'_': 0}
def get_mode(self):
return "ASTF"
= state
def _get_profile_state(self, profile_id):
return self.astf_profile_state.get(profile_id, self.STATE_IDLE) if self.is_dynamic else self.state
def _transmit_async(self, rpc_func, ok_states, bad_states = None, ready_state = None, **k):
profile_id = k['params']['profile_id']
ok_states = listify(ok_states)
if bad_states is not None:
bad_states = listify(bad_states)
self.wait_for_steady()
if rpc_func == 'start' and self.state is not self.STATE_TX:
self.inc_epoch()
self.sync_waiting = True
try:
if ready_state:
assert ready_state not in self.transient_states
if self._get_profile_state(profile_id) != ready_state:
self.wait_for_profile_state(profile_id, ready_state)
else:
self.wait_for_steady(profile_id)
rc = self._transmit(rpc_func, **k)
if not rc:
return rc
timer = PassiveTimer()
while True:
state = self._get_profile_state(profile_id)
if state in ok_states:
return RC_OK()
if ready_state and state in self.transient_states:
ready_state = None
if self.last_profile_error.get(profile_id) or (not ready_state and bad_states and state in bad_states):
error = self.last_profile_error.pop(profile_id, None)
general_error = 'Unknown error, state: {}, profile: {}'.format(state, profile_id)
return RC_ERR(error or general_error)
if timer.has_elapsed(0.2):
self.sync()
else:
time.sleep(0.001)
finally:
self.sync_waiting = False
def check_states(self, ok_states):
cnt = 0
while True:
if self.state in ok_states:
break
cnt = cnt + 1
if cnt % 10 == 0:
self.sync()
else:
time.sleep(0.1)
self.sync()
def _is_service_req(self):
return False
dual_ip, seq_split):
if not is_valid_ipv4(start_ip):
raise TRexError("start_ip is not a valid IPv4 address: '%s'" % start_ip)
if not is_valid_ipv4(end_ip):
raise TRexError("end_ip is not a valid IPv4 address: '%s'" % end_ip)
if not is_valid_ipv4(dual_ip):
raise TRexError("dual_ip is not a valid IPv4 address: '%s'" % dual_ip)
params = {
'start_ip': start_ip,
'end_ip': end_ip,
'dual_ip': dual_ip,
'seq_split': seq_split,
}
rc = self._transmit('get_traffic_dist', params = params)
if not rc:
raise TRexError(rc.err())
res = {}
for port_id, port_data in rc.data().items():
core_dict = {}
for core_id, core_data in port_data.items():
core_dict[int(core_id)] = core_data
res[int(port_id)] = core_dict
return res
@client_api('command', True)
def clear_profile(self, block = True, pid_input = DEFAULT_PROFILE_ID):
ok_states = [self.STATE_IDLE, self.STATE_ASTF_LOADED]
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
profile_state = self.astf_profile_state.get(profile_id)
if profile_state in ok_states:
params = {
'handler': self.handler,
'profile_id': profile_id
}
self.ctx.logger.pre_cmd('Clearing loaded profile.')
if block:
rc = self._transmit_async('profile_clear', params = params, ok_states = self.STATE_IDLE)
else:
rc = self._transmit('profile_clear', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
else:
self.logger.info(format_text("Cannot remove a profile: %s is not state IDLE and state LOADED.\n" % profile_id, "bold", "magenta"))
@client_api('command', True)
def start(self, mult = 1, duration = -1, nc = False, block = True, latency_pps = 0, ipv6 = False, pid_input = DEFAULT_PROFILE_ID, client_mask = 0xffffffff):
params = {
'handler': self.handler,
'profile_id': pid_input,
'mult': mult,
'nc': nc,
'duration': duration,
'latency_pps': latency_pps,
'ipv6': ipv6,
'client_mask': client_mask,
}
self.ctx.logger.pre_cmd('Starting traffic.')
valid_pids = self.validate_profile_id_input(pid_input, start = True)
for profile_id in valid_pids:
if block:
rc = self._transmit_async('start', params = params, ok_states = self.STATE_TX, bad_states = self.STATE_ASTF_LOADED, ready_state = self.STATE_ASTF_LOADED)
else:
rc = self._transmit('start', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def stop(self, block = True, pid_input = DEFAULT_PROFILE_ID, is_remove = False):
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
profile_state = self.astf_profile_state.get(profile_id)
if profile_state in {self.STATE_ASTF_PARSE, self.STATE_ASTF_BUILD}:
self.wait_for_profile_state(profile_id, self.STATE_TX)
profile_state = self.astf_profile_state.get(profile_id)
if profile_state is self.STATE_TX:
params = {
'handler': self.handler,
'profile_id': profile_id
}
self.ctx.logger.pre_cmd('Stopping traffic.')
if block or is_remove:
rc = self._transmit_async('stop', params = params, ok_states = [self.STATE_IDLE, self.STATE_ASTF_LOADED])
else:
rc = self._transmit('stop', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
profile_state = self.astf_profile_state.get(profile_id)
if is_remove:
if profile_state is self.STATE_ASTF_CLEANUP:
self.wait_for_profile_state(profile_id, self.STATE_ASTF_LOADED)
self.clear_profile(block = block, pid_input = profile_id)
@client_api('command', True)
def update(self, mult, pid_input = DEFAULT_PROFILE_ID):
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
params = {
'handler': self.handler,
'profile_id': profile_id,
'mult': mult,
}
self.ctx.logger.pre_cmd('Updating traffic.')
rc = self._transmit('update', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def get_profiles(self):
params = {
'handler': self.handler,
}
self.ctx.logger.pre_cmd('Getting profile list.')
rc = self._transmit('get_profile_list', params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def wait_on_traffic(self, timeout = None, profile_id = None):
if profile_id is None:
ports = self.get_all_ports()
TRexClient.wait_on_traffic(self, ports, timeout)
else:
self.wait_for_profile_state(profile_id, self.STATE_ASTF_LOADED, timeout)
@client_api('getter', True)
def get_stats(self, ports = None, sync_now = True, skip_zero = True, pid_input = DEFAULT_PROFILE_ID, is_sum = False):
stats = self._get_stats_common(ports, sync_now)
stats['traffic'] = self.get_traffic_stats(skip_zero, pid_input, is_sum = is_sum)
stats['latency'] = self.get_latency_stats(skip_zero)
return stats
@client_api('getter', True)
def clear_stats(self,
ports = None,
clear_global = True,
clear_xstats = True,
clear_traffic = True,
pid_input = DEFAULT_PROFILE_ID):
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
if clear_traffic:
self.clear_traffic_stats(profile_id)
self.clear_traffic_stats(is_sum = True)
return self._clear_stats_common(ports, clear_global, clear_xstats)
@client_api('getter', True)
def get_tg_names(self, pid_input = DEFAULT_PROFILE_ID):
return self.traffic_stats.get_tg_names(pid_input)
@client_api('getter', True)
def get_traffic_tg_stats(self, tg_names, skip_zero=True, pid_input = DEFAULT_PROFILE_ID):
validate_type('tg_names', tg_names, (list, basestring))
return self.traffic_stats.get_traffic_tg_stats(tg_names, skip_zero, pid_input = pid_input)
@client_api('getter', True)
def get_traffic_stats(self, skip_zero = True, pid_input = DEFAULT_PROFILE_ID, is_sum = False):
return self.traffic_stats.get_stats(skip_zero, pid_input = pid_input, is_sum = is_sum)
@client_api('getter', True)
def get_profiles_state(self):
states = {}
for key, value in self.astf_profile_state.items():
states[key] = astf_states[value] if value else "STATE_UNKNOWN"
return states
@client_api('getter', True)
def is_traffic_stats_error(self, stats):
return self.traffic_stats.is_traffic_stats_error(stats)
@client_api('getter', True)
def clear_traffic_stats(self, pid_input = DEFAULT_PROFILE_ID, is_sum = False):
return self.traffic_stats.clear_stats(pid_input, is_sum)
@client_api('getter', True)
def get_latency_stats(self,skip_zero =True):
return self.latency_stats.get_stats(skip_zero)
@client_api('command', True)
def start_latency(self, mult = 1, src_ipv4="16.0.0.1", dst_ipv4="48.0.0.1", ports_mask=0x7fffffff, dual_ipv4 = "1.0.0.0"):
if not is_valid_ipv4(src_ipv4):
raise TRexError("src_ipv4 is not a valid IPv4 address: '{0}'".format(src_ipv4))
if not is_valid_ipv4(dst_ipv4):
raise TRexError("dst_ipv4 is not a valid IPv4 address: '{0}'".format(dst_ipv4))
if not is_valid_ipv4(dual_ipv4):
raise TRexError("dual_ipv4 is not a valid IPv4 address: '{0}'".format(dual_ipv4))
params = {
'handler': self.handler,
'mult': mult,
'src_addr': src_ipv4,
'dst_addr': dst_ipv4,
'dual_port_addr': dual_ipv4,
'mask': ports_mask,
}
self.ctx.logger.pre_cmd('Starting latency traffic.')
rc = self._transmit("start_latency", params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def stop_latency(self):
params = {
'handler': self.handler
}
self.ctx.logger.pre_cmd('Stopping latency traffic.')
rc = self._transmit("stop_latency", params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def update_latency(self, mult = 1):
params = {
'handler': self.handler,
'mult': mult,
}
self.ctx.logger.pre_cmd('Updating latency rate.')
rc = self._transmit("update_latency", params = params)
self.ctx.logger.post_cmd(rc)
if not rc:
raise TRexError(rc.err())
@client_api('command', True)
def topo_load(self, topology, tunables = {}):
self.topo_mngr.load(topology, **tunables)
print('')
@client_api('command', True)
def topo_clear(self):
self.topo_mngr.clear()
@client_api('command', True)
def topo_resolve(self, ports = None):
self.topo_mngr.resolve(ports)
@client_api('command', False)
def topo_show(self, ports = None):
self.topo_mngr.show(ports)
print('')
@client_api('command', False)
def topo_save(self, filename):
if os.path.exists(filename):
if os.path.islink(filename) or not os.path.isfile(filename):
raise TRexError("Given path exists and it's not a file!")
sys.stdout.write('\nFilename %s already exists, overwrite? (y/N) ' % filename)
ans = user_input().strip()
if ans.lower() not in ('y', 'yes'):
print('Not saving.')
return
try:
if filename.endswith('.json'):
self.ctx.logger.pre_cmd('Saving topology to JSON: %s' % filename)
code = self.topo_mngr.to_json(False)
elif filename.endswith('.yaml'):
self.ctx.logger.pre_cmd('Saving topology to YAML: %s' % filename)
code = self.topo_mngr.to_yaml()
elif filename.endswith('.py'):
self.ctx.logger.pre_cmd('Saving topology to Python script: %s' % filename)
code = self.topo_mngr.to_code()
else:
self.ctx.logger.error('Saved filename should be .py or .json or .yaml')
return
with open(filename, 'w') as f:
f.write(code)
except Exception as e:
self.ctx.logger.post_cmd(False)
raise TRexError('Saving file failed: %s' % e)
self.ctx.logger.post_cmd(True)
# private function to form json data for GTP tunnel
def _update_gtp_tunnel(self, client_list):
json_attr = []
for key, value in client_list.items():
json_attr.append({'client_ip' : key, 'sip': value.sip, 'dip' : value.dip, 'teid' : value.teid, "version" :value.version})
return json_attr
# execute 'method' for inserting/updateing tunnel info for clients
def update_tunnel_client_record (self, client_list, tunnel_type):
json_attr = []
if tunnel_type == TunnelType.GTP:
json_attr = self._update_gtp_tunnel(client_list)
else:
raise TRexError('Invalid Tunnel Type: %d' % tunnel_type)
params = {"tunnel_type": tunnel_type,
"attr": json_attr }
return self._transmit("update_tunnel_client", params)
# execute 'method' for Making a client active/inactive
def set_client_enable(self, client_list, is_enable):
json_attr = []
for key in client_list:
json_attr.append({'client_ip' : key})
params = {"is_enable": is_enable,
"is_range": False,
"attr": json_attr }
return self._transmit("enable_disable_client", params)
# execute 'method' for Making a client active/inactive
def set_client_enable_range(self, client_start, client_end, is_enable):
json_attr = []
json_attr.append({'client_start_ip' : client_start, 'client_end_ip' : client_end})
params = {"is_enable": is_enable,
"is_range": True,
"attr": json_attr }
return self._transmit("enable_disable_client", params)
# execute 'method' for getting clients stats
def get_clients_info (self, client_list):
json_attr = []
for key in client_list:
json_attr.append({'client_ip' : key})
params = {"is_range": False,
"attr": json_attr }
return self._transmit("get_clients_info", params)
# execute 'method' for getting clients stats
def get_clients_info_range (self, client_start, client_end):
json_attr = []
json_attr.append({'client_start_ip' : client_start, 'client_end_ip' : client_end})
params = {"is_range": True,
"attr": json_attr }
return self._transmit("get_clients_info", params)
############################ console #############################
############################ commands #############################
############################ #############################
@console_api('acquire', 'common', True)
def acquire_line (self, line):
# define a parser
parser = parsing_opts.gen_parser(
self,
'acquire',
self.acquire_line.__doc__,
parsing_opts.FORCE)
opts = parser.parse_args(shlex.split(line))
self.acquire(force = opts.force)
return True
@console_api('reset', 'common', True)
def reset_line(self, line):
parser = parsing_opts.gen_parser(
self,
'reset',
self.reset_line.__doc__,
parsing_opts.PORT_RESTART
)
opts = parser.parse_args(shlex.split(line))
self.reset(restart = opts.restart)
return True
@console_api('start', 'ASTF', True)
def start_line(self, line):
# parse tunables with the previous form. (-t var1=x1,var2=x2..)
def parse_tunables_old_version(tunables_parameters):
parser = parsing_opts.gen_parser(self,
"start",
self.start_line.__doc__,
parsing_opts.TUNABLES)
args = parser.parse_args(tunables_parameters.split())
return args.tunables
# parser for parsing the start command arguments
parser = parsing_opts.gen_parser(self,
'start',
self.start_line.__doc__,
parsing_opts.FILE_PATH,
parsing_opts.MULTIPLIER_NUM,
parsing_opts.DURATION,
parsing_opts.ARGPARSE_TUNABLES,
parsing_opts.ASTF_NC,
parsing_opts.ASTF_LATENCY,
parsing_opts.ASTF_IPV6,
parsing_opts.ASTF_CLIENT_CTRL,
parsing_opts.ASTF_PROFILE_LIST
)
opts = parser.parse_args(shlex.split(line))
help_flags = ('-h', '--help')
# if the user chose to pass the tunables arguments in previous version (-t var1=x1,var2=x2..)
# we decode the tunables and then convert the output from dictionary to list in order to have the same format with the
# newer version.
tunable_dict = {}
if "-t" in line and '=' in line:
tunable_parameter = "-t " + line.split("-t")[1].strip("-h").strip("--help").strip()
tunable_dict = parse_tunables_old_version(tunable_parameter)
tunable_list = []
# converting from tunables dictionary to list
for tunable_key in tunable_dict:
tunable_list.extend(["--{}".format(tunable_key), str(tunable_dict[tunable_key])])
if any(h in opts.tunables for h in help_flags):
tunable_list.append("--help")
opts.tunables = tunable_list
tunable_dict["tunables"] = opts.tunables
valid_pids = self.validate_profile_id_input(opts.profiles, start = True)
for profile_id in valid_pids:
self.load_profile(opts.file[0], tunable_dict, pid_input = profile_id)
#when ".. -t --help", is called the help message is being printed once and then it returns to the console
if any(h in opts.tunables for h in help_flags):
break
kw = {}
if opts.clients:
for client in opts.clients:
if client not in self.ports:
raise TRexError('Invalid client interface: %d' % client)
if client & 1:
raise TRexError('Following interface is not client: %d' % client)
kw['client_mask'] = self._calc_port_mask(opts.clients)
elif opts.servers_only:
kw['client_mask'] = 0
self.start(opts.mult, opts.duration, opts.nc, False, opts.latency_pps, opts.ipv6, pid_input = profile_id, **kw)
return True
@console_api('stop', 'ASTF', True)
def stop_line(self, line):
parser = parsing_opts.gen_parser(
self,
'stop',
self.stop_line.__doc__,
parsing_opts.ASTF_PROFILE_DEFAULT_LIST,
parsing_opts.REMOVE
)
opts = parser.parse_args(shlex.split(line))
self.stop(False, pid_input = opts.profiles, is_remove = opts.remove)
@console_api('update', 'ASTF', True)
def update_line(self, line):
parser = parsing_opts.gen_parser(
self,
'update',
self.update_line.__doc__,
parsing_opts.MULTIPLIER_NUM,
parsing_opts.ASTF_PROFILE_DEFAULT_LIST
)
opts = parser.parse_args(shlex.split(line))
self.update(opts.mult, pid_input = opts.profiles)
@console_api('service', 'ASTF', True)
def service_line (self, line):
parser = parsing_opts.gen_parser(self,
"service",
self.service_line.__doc__,
parsing_opts.SERVICE_GROUP)
opts = parser.parse_args(line.split())
enabled, filtered, mask = self._get_service_params(opts)
if mask is not None and ((mask & NO_TCP_UDP_MASK) == 0):
raise TRexError('Cannot set NO_TCP_UDP off in ASTF!')
self.set_service_mode(enabled = enabled, filtered = filtered, mask = mask)
return True
@staticmethod
def _calc_port_mask(ports):
mask =0
for p in ports:
mask += (1<<p)
return mask
@console_api('latency', 'ASTF', True)
def latency_line(self, line):
parser = parsing_opts.gen_parser(
self,
'latency',
self.latency_line.__doc__)
def latency_add_parsers(subparsers, cmd, help = '', **k):
return subparsers.add_parser(cmd, description = help, help = help, **k)
subparsers = parser.add_subparsers(title = 'commands', dest = 'command', metavar = '')
start_parser = latency_add_parsers(subparsers, 'start', help = 'Start latency traffic')
latency_add_parsers(subparsers, 'stop', help = 'Stop latency traffic')
update_parser = latency_add_parsers(subparsers, 'update', help = 'Update rate of running latency')
latency_add_parsers(subparsers, 'show', help = 'alias for stats -l')
latency_add_parsers(subparsers, 'hist', help = 'alias for stats --lh')
latency_add_parsers(subparsers, 'counters', help = 'alias for stats --lc')
start_parser.add_arg_list(
parsing_opts.MULTIPLIER_NUM,
parsing_opts.SRC_IPV4,
parsing_opts.DST_IPV4,
parsing_opts.PORT_LIST,
parsing_opts.DUAL_IPV4
)
update_parser.add_arg_list(
parsing_opts.MULTIPLIER_NUM,
)
opts = parser.parse_args(shlex.split(line))
if opts.command == 'start':
ports_mask = self._calc_port_mask(opts.ports)
self.start_latency(opts.mult, opts.src_ipv4, opts.dst_ipv4, ports_mask, opts.dual_ip)
elif opts.command == 'stop':
self.stop_latency()
elif opts.command == 'update':
self.update_latency(mult = opts.mult)
elif opts.command == 'show' or not opts.command:
self._show_latency_stats()
elif opts.command == 'hist':
self._show_latency_histogram()
elif opts.command == 'counters':
self._show_latency_counters()
else:
raise TRexError('Unhandled command %s' % opts.command)
return True
@console_api('topo', 'ASTF', True, True)
def topo_line(self, line):
parser = parsing_opts.gen_parser(
self,
'topo',
self.topo_line.__doc__)
def topology_add_parsers(subparsers, cmd, help = '', **k):
return subparsers.add_parser(cmd, description = help, help = help, **k)
subparsers = parser.add_subparsers(title = 'commands', dest = 'command', metavar = '')
load_parser = topology_add_parsers(subparsers, 'load', help = 'Load topology from file')
reso_parser = topology_add_parsers(subparsers, 'resolve', help = 'Resolve loaded topology, push to server on success')
show_parser = topology_add_parsers(subparsers, 'show', help = 'Show current topology status')
topology_add_parsers(subparsers, 'clear', help = 'Clear current topology')
save_parser = topology_add_parsers(subparsers, 'save', help = 'Save topology to file')
load_parser.add_arg_list(
parsing_opts.FILE_PATH,
parsing_opts.TUNABLES,
)
reso_parser.add_arg_list(
parsing_opts.PORT_LIST_NO_DEFAULT,
)
show_parser.add_arg_list(
parsing_opts.PORT_LIST_NO_DEFAULT,
)
save_parser.add_arg_list(
parsing_opts.FILE_PATH_NO_CHECK,
)
opts = parser.parse_args(shlex.split(line))
if opts.command == 'load':
self.topo_load(opts.file[0], opts.tunables)
return False
elif opts.command == 'resolve':
self.topo_resolve(opts.ports_no_default)
elif opts.command == 'show' or not opts.command:
self.topo_show(opts.ports_no_default)
return False
elif opts.command == 'clear':
self.topo_clear()
elif opts.command == 'save':
self.topo_save(opts.file[0])
else:
raise TRexError('Unhandled command %s' % opts.command)
return True
@console_api('clear', 'common', False)
def clear_stats_line (self, line):
# define a parser
parser = parsing_opts.gen_parser(self,
"clear",
self.clear_stats_line.__doc__,
parsing_opts.PORT_LIST_WITH_ALL)
opts = parser.parse_args(line.split())
self.clear_stats(opts.ports, pid_input = ALL_PROFILE_ID)
return RC_OK()
@console_api('stats', 'common', True)
def show_stats_line (self, line):
# define a parser
parser = parsing_opts.gen_parser(
self,
'stats',
self.show_stats_line.__doc__,
parsing_opts.PORT_LIST,
parsing_opts.ASTF_STATS_GROUP,
parsing_opts.ASTF_PROFILE_STATS)
astf_profiles_state = self.get_profiles_state()
valid_pids = list(astf_profiles_state.keys())
opts = parser.parse_args(shlex.split(line))
if not opts:
return
# without parameters show only global and ports
if not opts.stats:
self._show_global_stats()
self._show_port_stats(opts.ports)
return
if self.is_dynamic == True and opts.pfname == None:
is_sum = True
valid_pids = self.validate_profile_id_input(pid_input = DEFAULT_PROFILE_ID)
else:
is_sum = False
valid_pids = self.validate_profile_id_input(pid_input = opts.pfname)
# decode which stats to show
if opts.stats == 'global':
self._show_global_stats()
elif opts.stats == 'ports':
self._show_port_stats(opts.ports)
elif opts.stats == 'xstats':
self._show_port_xstats(opts.ports, False)
elif opts.stats == 'xstats_inc_zero':
self._show_port_xstats(opts.ports, True)
elif opts.stats == 'status':
self._show_port_status(opts.ports)
elif opts.stats == 'cpu':
self._show_cpu_util()
elif opts.stats == 'mbuf':
self._show_mbuf_util()
elif opts.stats == 'astf':
for profile_id in valid_pids:
self._show_traffic_stats(False, pid_input = profile_id, is_sum = is_sum)
elif opts.stats == 'astf_inc_zero':
for profile_id in valid_pids:
self._show_traffic_stats(True, pid_input = profile_id, is_sum = is_sum)
elif opts.stats == 'latency':
self._show_latency_stats()
elif opts.stats == 'latency_histogram':
self._show_latency_histogram()
elif opts.stats == 'latency_counters':
self._show_latency_counters()
else:
raise TRexError('Unhandled stat: %s' % opts.stats)
@console_api('template_group', 'ASTF', True)
def template_group_line(self, line):
parser = parsing_opts.gen_parser(
self,
'template_group',
self.template_group_line.__doc__
)
def template_group_add_parsers(subparsers, cmd, help = '', **k):
return subparsers.add_parser(cmd, description = help, help = help, **k)
subparsers = parser.add_subparsers(title = 'commands', dest = 'command', metavar = '')
names_parser = template_group_add_parsers(subparsers, 'names', help = 'Get template group names')
stats_parser = template_group_add_parsers(subparsers, 'stats', help = 'Get stats for template group')
names_parser.add_arg_list(parsing_opts.TG_NAME_START)
names_parser.add_arg_list(parsing_opts.TG_NAME_AMOUNT)
names_parser.add_arg_list(parsing_opts.ASTF_PROFILE_LIST)
stats_parser.add_arg_list(parsing_opts.TG_STATS)
stats_parser.add_arg_list(parsing_opts.ASTF_PROFILE_LIST)
opts = parser.parse_args(shlex.split(line))
if not opts:
return
pid_input = opts.profiles
valid_pids = self.validate_profile_id_input(pid_input)
for profile_id in valid_pids:
if opts.command == 'names':
self.traffic_stats._show_tg_names(start=opts.start, amount=opts.amount, pid_input = profile_id)
elif opts.command == 'stats':
try:
self.get_tg_names(profile_id)
tgid = self.traffic_stats._translate_names_to_ids(opts.name, pid_input = profile_id)
self._show_traffic_stats(include_zero_lines=False, tgid = tgid[0], pid_input = profile_id)
except ASTFErrorBadTG:
print(format_text("Template group name %s doesn't exist!" % opts.name, 'bold'))
else:
raise TRexError('Unhandled command: %s' % opts.command)
def _get_num_of_tgids(self, pid_input = DEFAULT_PROFILE_ID):
return self.traffic_stats._get_num_of_tgids(pid_input)
def _show_traffic_stats(self, include_zero_lines, buffer = sys.stdout, tgid = 0, pid_input = DEFAULT_PROFILE_ID, is_sum = False):
table = self.traffic_stats.to_table(include_zero_lines, tgid, pid_input, is_sum = is_sum)
text_tables.print_table_with_header(table, untouched_header = table.title, buffer = buffer)
def _show_latency_stats(self, buffer = sys.stdout):
table = self.latency_stats.to_table_main()
text_tables.print_table_with_header(table, untouched_header = table.title, buffer = buffer)
def _show_latency_histogram(self, buffer = sys.stdout):
table = self.latency_stats.histogram_to_table()
text_tables.print_table_with_header(table, untouched_header = table.title, buffer = buffer)
def _show_latency_counters(self, buffer = sys.stdout):
table = self.latency_stats.counters_to_table()
text_tables.print_table_with_header(table, untouched_header = table.title, buffer = buffer)
def _show_profiles_states(self, buffer = sys.stdout):
table = text_tables.TRexTextTable()
table.set_cols_align(["c"] + ["c"])
table.set_cols_width([20] + [20])
table.header(["ID", "State"])
self.sync()
profiles_state = sorted(self.get_profiles_state().items())
for profile_id, state in profiles_state:
table.add_row([
profile_id,
state
])
return table
@console_api('profiles', 'ASTF', True, True)
def profiles_line(self, line):
parser = parsing_opts.gen_parser(self,
"profiles",
self.profiles_line.__doc__)
opts = parser.parse_args(line.split())
if not opts:
return opts
table = self._show_profiles_states()
if not table:
self.logger.info(format_text("No profiles found with desired filter.\n", "bold", "magenta"))
text_tables.print_table_with_header(table, header = 'Profile states')
| true | true |
f733a38586237d09cb05dc4b8ec5ac633e12d4d7 | 6,724 | py | Python | examples/pwr_run/checkpointing/nonpc_short/final1/job20.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/nonpc_short/final1/job20.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/nonpc_short/final1/job20.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.0005
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final1/' + job_name + '*'
total_epochs = 44
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final1/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 30.563636 | 118 | 0.703004 |
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.mobilenet_v2 import MobileNetV2
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
batch_size = 256
args_lr = 0.0005
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final1/' + job_name + '*'
total_epochs = 44
starting_epoch = 0
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
subtract_pixel_mean = True
n = 3
model_type = args.tc
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
base_model = MobileNetV2(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
print(model_type)
current_epoch = 0
| true | true |
f733a3ecee48a2958a6f46d7e9a4ea6651a8b85a | 7,142 | py | Python | DeepFilterNet/df/utils.py | Rikorose/DeepFilterNet | afe6bfb53efae70207e18df7ed372c2cfe337fee | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 54 | 2021-10-13T01:17:11.000Z | 2022-03-24T00:54:01.000Z | DeepFilterNet/df/utils.py | Rikorose/DeepFilterNet | afe6bfb53efae70207e18df7ed372c2cfe337fee | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 33 | 2021-11-04T23:16:12.000Z | 2022-03-24T10:15:34.000Z | DeepFilterNet/df/utils.py | Rikorose/DeepFilterNet | afe6bfb53efae70207e18df7ed372c2cfe337fee | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 16 | 2021-10-15T02:06:52.000Z | 2022-03-24T00:54:04.000Z | import collections
import math
import os
import random
import subprocess
from socket import gethostname
from typing import Any, Dict, Set, Tuple, Union
import numpy as np
import torch
from loguru import logger
from torch import Tensor
from torch._six import string_classes
from torch.autograd import Function
from torch.types import Number
from df.config import config
from df.model import ModelParams
try:
from torchaudio.functional import resample as ta_resample
except ImportError:
from torchaudio.compliance.kaldi import resample_waveform as ta_resample # type: ignore
def get_resample_params(method: str) -> Dict[str, Any]:
params = {
"sinc_fast": {"resampling_method": "sinc_interpolation", "lowpass_filter_width": 16},
"sinc_best": {"resampling_method": "sinc_interpolation", "lowpass_filter_width": 64},
"kaiser_fast": {
"resampling_method": "kaiser_window",
"lowpass_filter_width": 16,
"rolloff": 0.85,
"beta": 8.555504641634386,
},
"kaiser_best": {
"resampling_method": "kaiser_window",
"lowpass_filter_width": 16,
"rolloff": 0.9475937167399596,
"beta": 14.769656459379492,
},
}
assert method in params.keys(), f"method must be one of {list(params.keys())}"
return params[method]
def resample(audio: Tensor, orig_sr: int, new_sr: int, method="sinc_fast"):
params = get_resample_params(method)
return ta_resample(audio, orig_sr, new_sr, **params)
def get_device():
s = config("DEVICE", default="", section="train")
if s == "":
if torch.cuda.is_available():
DEVICE = torch.device("cuda:0")
else:
DEVICE = torch.device("cpu")
else:
DEVICE = torch.device(s)
return DEVICE
def as_complex(x: Tensor):
if torch.is_complex(x):
return x
if x.shape[-1] != 2:
raise ValueError(f"Last dimension need to be of length 2 (re + im), but got {x.shape}")
if x.stride(-1) != 1:
x = x.contiguous()
return torch.view_as_complex(x)
def as_real(x: Tensor):
if torch.is_complex(x):
return torch.view_as_real(x)
return x
class angle_re_im(Function):
"""Similar to torch.angle but robustify the gradient for zero magnitude."""
@staticmethod
def forward(ctx, re: Tensor, im: Tensor):
ctx.save_for_backward(re, im)
return torch.atan2(im, re)
@staticmethod
def backward(ctx, grad: Tensor) -> Tuple[Tensor, Tensor]:
re, im = ctx.saved_tensors
grad_inv = grad / (re.square() + im.square()).clamp_min_(1e-10)
return -im * grad_inv, re * grad_inv
class angle(Function):
"""Similar to torch.angle but robustify the gradient for zero magnitude."""
@staticmethod
def forward(ctx, x: Tensor):
ctx.save_for_backward(x)
return torch.atan2(x.imag, x.real)
@staticmethod
def backward(ctx, grad: Tensor):
(x,) = ctx.saved_tensors
grad_inv = grad / (x.real.square() + x.imag.square()).clamp_min_(1e-10)
return torch.view_as_complex(torch.stack((-x.imag * grad_inv, x.real * grad_inv), dim=-1))
def check_finite_module(obj, name="Module", _raise=True) -> Set[str]:
out: Set[str] = set()
if isinstance(obj, torch.nn.Module):
for name, child in obj.named_children():
out = out | check_finite_module(child, name)
for name, param in obj.named_parameters():
out = out | check_finite_module(param, name)
for name, buf in obj.named_buffers():
out = out | check_finite_module(buf, name)
if _raise and len(out) > 0:
raise ValueError(f"{name} not finite during checkpoint writing including: {out}")
return out
def make_np(x: Union[Tensor, np.ndarray, Number]) -> np.ndarray:
"""Transforms Tensor to numpy.
Args:
x: An instance of torch tensor or caffe blob name
Returns:
numpy.array: Numpy array
"""
if isinstance(x, np.ndarray):
return x
if np.isscalar(x):
return np.array([x])
if isinstance(x, Tensor):
return x.detach().cpu().numpy()
raise NotImplementedError(
"Got {}, but numpy array, scalar, or torch tensor are expected.".format(type(x))
)
def get_norm_alpha(log: bool = True) -> float:
p = ModelParams()
a_ = _calculate_norm_alpha(sr=p.sr, hop_size=p.hop_size, tau=p.norm_tau)
precision = 3
a = 1.0
while a >= 1.0:
a = round(a_, precision)
precision += 1
if log:
logger.info(f"Running with normalization window alpha = '{a}'")
return a
def _calculate_norm_alpha(sr: int, hop_size: int, tau: float):
"""Exponential decay factor alpha for a given tau (decay window size [s])."""
dt = hop_size / sr
return math.exp(-dt / tau)
def check_manual_seed(seed: int = None):
"""If manual seed is not specified, choose a random one and communicate it to the user."""
seed = seed or random.randint(1, 10000)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
return seed
def get_git_root():
git_local_dir = os.path.dirname(os.path.abspath(__file__))
args = ["git", "-C", git_local_dir, "rev-parse", "--show-toplevel"]
return subprocess.check_output(args).strip().decode()
def get_commit_hash():
"""Returns the current git commit."""
try:
git_dir = get_git_root()
args = ["git", "-C", git_dir, "rev-parse", "--short", "--verify", "HEAD"]
commit = subprocess.check_output(args).strip().decode()
except subprocess.CalledProcessError:
# probably not in git repo
commit = None
return commit
def get_host() -> str:
return gethostname()
def get_branch_name():
try:
git_dir = os.path.dirname(os.path.abspath(__file__))
args = ["git", "-C", git_dir, "rev-parse", "--abbrev-ref", "HEAD"]
branch = subprocess.check_output(args).strip().decode()
except subprocess.CalledProcessError:
# probably not in git repo
branch = None
return branch
# from pytorch/ignite:
def apply_to_tensor(input_, func):
"""Apply a function on a tensor or mapping, or sequence of tensors."""
if isinstance(input_, torch.nn.Module):
return [apply_to_tensor(c, func) for c in input_.children()]
elif isinstance(input_, torch.nn.Parameter):
return func(input_.data)
elif isinstance(input_, Tensor):
return func(input_)
elif isinstance(input_, string_classes):
return input_
elif isinstance(input_, collections.Mapping):
return {k: apply_to_tensor(sample, func) for k, sample in input_.items()}
elif isinstance(input_, collections.Iterable):
return [apply_to_tensor(sample, func) for sample in input_]
elif input_ is None:
return input_
else:
return input_
def detach_hidden(hidden: Any) -> Any:
"""Cut backpropagation graph.
Auxillary function to cut the backpropagation graph by detaching the hidden
vector.
"""
return apply_to_tensor(hidden, Tensor.detach)
| 30.917749 | 98 | 0.646458 | import collections
import math
import os
import random
import subprocess
from socket import gethostname
from typing import Any, Dict, Set, Tuple, Union
import numpy as np
import torch
from loguru import logger
from torch import Tensor
from torch._six import string_classes
from torch.autograd import Function
from torch.types import Number
from df.config import config
from df.model import ModelParams
try:
from torchaudio.functional import resample as ta_resample
except ImportError:
from torchaudio.compliance.kaldi import resample_waveform as ta_resample
def get_resample_params(method: str) -> Dict[str, Any]:
params = {
"sinc_fast": {"resampling_method": "sinc_interpolation", "lowpass_filter_width": 16},
"sinc_best": {"resampling_method": "sinc_interpolation", "lowpass_filter_width": 64},
"kaiser_fast": {
"resampling_method": "kaiser_window",
"lowpass_filter_width": 16,
"rolloff": 0.85,
"beta": 8.555504641634386,
},
"kaiser_best": {
"resampling_method": "kaiser_window",
"lowpass_filter_width": 16,
"rolloff": 0.9475937167399596,
"beta": 14.769656459379492,
},
}
assert method in params.keys(), f"method must be one of {list(params.keys())}"
return params[method]
def resample(audio: Tensor, orig_sr: int, new_sr: int, method="sinc_fast"):
params = get_resample_params(method)
return ta_resample(audio, orig_sr, new_sr, **params)
def get_device():
s = config("DEVICE", default="", section="train")
if s == "":
if torch.cuda.is_available():
DEVICE = torch.device("cuda:0")
else:
DEVICE = torch.device("cpu")
else:
DEVICE = torch.device(s)
return DEVICE
def as_complex(x: Tensor):
if torch.is_complex(x):
return x
if x.shape[-1] != 2:
raise ValueError(f"Last dimension need to be of length 2 (re + im), but got {x.shape}")
if x.stride(-1) != 1:
x = x.contiguous()
return torch.view_as_complex(x)
def as_real(x: Tensor):
if torch.is_complex(x):
return torch.view_as_real(x)
return x
class angle_re_im(Function):
@staticmethod
def forward(ctx, re: Tensor, im: Tensor):
ctx.save_for_backward(re, im)
return torch.atan2(im, re)
@staticmethod
def backward(ctx, grad: Tensor) -> Tuple[Tensor, Tensor]:
re, im = ctx.saved_tensors
grad_inv = grad / (re.square() + im.square()).clamp_min_(1e-10)
return -im * grad_inv, re * grad_inv
class angle(Function):
@staticmethod
def forward(ctx, x: Tensor):
ctx.save_for_backward(x)
return torch.atan2(x.imag, x.real)
@staticmethod
def backward(ctx, grad: Tensor):
(x,) = ctx.saved_tensors
grad_inv = grad / (x.real.square() + x.imag.square()).clamp_min_(1e-10)
return torch.view_as_complex(torch.stack((-x.imag * grad_inv, x.real * grad_inv), dim=-1))
def check_finite_module(obj, name="Module", _raise=True) -> Set[str]:
out: Set[str] = set()
if isinstance(obj, torch.nn.Module):
for name, child in obj.named_children():
out = out | check_finite_module(child, name)
for name, param in obj.named_parameters():
out = out | check_finite_module(param, name)
for name, buf in obj.named_buffers():
out = out | check_finite_module(buf, name)
if _raise and len(out) > 0:
raise ValueError(f"{name} not finite during checkpoint writing including: {out}")
return out
def make_np(x: Union[Tensor, np.ndarray, Number]) -> np.ndarray:
if isinstance(x, np.ndarray):
return x
if np.isscalar(x):
return np.array([x])
if isinstance(x, Tensor):
return x.detach().cpu().numpy()
raise NotImplementedError(
"Got {}, but numpy array, scalar, or torch tensor are expected.".format(type(x))
)
def get_norm_alpha(log: bool = True) -> float:
p = ModelParams()
a_ = _calculate_norm_alpha(sr=p.sr, hop_size=p.hop_size, tau=p.norm_tau)
precision = 3
a = 1.0
while a >= 1.0:
a = round(a_, precision)
precision += 1
if log:
logger.info(f"Running with normalization window alpha = '{a}'")
return a
def _calculate_norm_alpha(sr: int, hop_size: int, tau: float):
dt = hop_size / sr
return math.exp(-dt / tau)
def check_manual_seed(seed: int = None):
seed = seed or random.randint(1, 10000)
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
return seed
def get_git_root():
git_local_dir = os.path.dirname(os.path.abspath(__file__))
args = ["git", "-C", git_local_dir, "rev-parse", "--show-toplevel"]
return subprocess.check_output(args).strip().decode()
def get_commit_hash():
try:
git_dir = get_git_root()
args = ["git", "-C", git_dir, "rev-parse", "--short", "--verify", "HEAD"]
commit = subprocess.check_output(args).strip().decode()
except subprocess.CalledProcessError:
commit = None
return commit
def get_host() -> str:
return gethostname()
def get_branch_name():
try:
git_dir = os.path.dirname(os.path.abspath(__file__))
args = ["git", "-C", git_dir, "rev-parse", "--abbrev-ref", "HEAD"]
branch = subprocess.check_output(args).strip().decode()
except subprocess.CalledProcessError:
branch = None
return branch
def apply_to_tensor(input_, func):
if isinstance(input_, torch.nn.Module):
return [apply_to_tensor(c, func) for c in input_.children()]
elif isinstance(input_, torch.nn.Parameter):
return func(input_.data)
elif isinstance(input_, Tensor):
return func(input_)
elif isinstance(input_, string_classes):
return input_
elif isinstance(input_, collections.Mapping):
return {k: apply_to_tensor(sample, func) for k, sample in input_.items()}
elif isinstance(input_, collections.Iterable):
return [apply_to_tensor(sample, func) for sample in input_]
elif input_ is None:
return input_
else:
return input_
def detach_hidden(hidden: Any) -> Any:
return apply_to_tensor(hidden, Tensor.detach)
| true | true |
f733a40bf98049f8734ae87f832ac02da58c2d79 | 1,471 | py | Python | backfill_alerting/delphi_backfill_alerting/config.py | jingjtang/covidcast-indicators | 34cb8786f78fbea2710b810a9500ee02c2379241 | [
"MIT"
] | null | null | null | backfill_alerting/delphi_backfill_alerting/config.py | jingjtang/covidcast-indicators | 34cb8786f78fbea2710b810a9500ee02c2379241 | [
"MIT"
] | null | null | null | backfill_alerting/delphi_backfill_alerting/config.py | jingjtang/covidcast-indicators | 34cb8786f78fbea2710b810a9500ee02c2379241 | [
"MIT"
] | null | null | null | """
This file contains configuration variables used for the backfill alerting.
"""
from datetime import datetime, timedelta
class Config:
"""Static configuration variables."""
## dates
FIRST_DATA_DATE = datetime(2020, 1, 1)
# shift dates forward for labeling purposes
DAY_SHIFT = timedelta(days=1)
## data columns
COVID_COUNT = "Covid"
TOTAL_COUNT = "Denom"
COUNT_COL = "count"
DATE_COL = "time_value"
GEO_COL = "geo_value"
ID_COLS = [DATE_COL] + [GEO_COL]
DATA_COLS = [DATE_COL, GEO_COL, COUNT_COL]
DATA_DTYPES = {DATE_COL: str, COUNT_COL: str, GEO_COL: str}
COUNT_TYPES = [COVID_COUNT, TOTAL_COUNT]
## file path
FILE_PATH = "%s/%s_Counts_Products_%s.dat.gz"
## GEO RELATED
COUNTY_LEVEL = "fips"
STATE_LEVEL = "state_id"
GEO_LEVELS = [COUNTY_LEVEL, STATE_LEVEL]
# Backfill Variables
CHANGE_RATE = "cr"
BACKFILL_FRACTION = "frc"
BACKFILL_VARS = [CHANGE_RATE, BACKFILL_FRACTION]
BACKFILL_REF_LAG = {CHANGE_RATE: [1, 7],
BACKFILL_FRACTION: [60]}
# Training variable
LAG_SPLITS = list(range(-1, 15)) + [28, 42, 60]
# For Alerting Messages
bv_names = {("cr", 7): "7-day change rate",
("cr", 1): "Daily change rate",
("frc", 60): "Backfill Fraction (anchor=60)"}
count_names = {"Covid": "COVID counts", "Denom": "Total counts"}
geo_names = {"fips": "county", "state_id": "state"}
| 27.754717 | 74 | 0.627464 |
from datetime import datetime, timedelta
class Config:
RST_DATA_DATE = datetime(2020, 1, 1)
DAY_SHIFT = timedelta(days=1)
NT = "Covid"
TOTAL_COUNT = "Denom"
COUNT_COL = "count"
DATE_COL = "time_value"
GEO_COL = "geo_value"
ID_COLS = [DATE_COL] + [GEO_COL]
DATA_COLS = [DATE_COL, GEO_COL, COUNT_COL]
DATA_DTYPES = {DATE_COL: str, COUNT_COL: str, GEO_COL: str}
COUNT_TYPES = [COVID_COUNT, TOTAL_COUNT]
ATH = "%s/%s_Counts_Products_%s.dat.gz"
EVEL = "fips"
STATE_LEVEL = "state_id"
GEO_LEVELS = [COUNTY_LEVEL, STATE_LEVEL]
CHANGE_RATE = "cr"
BACKFILL_FRACTION = "frc"
BACKFILL_VARS = [CHANGE_RATE, BACKFILL_FRACTION]
BACKFILL_REF_LAG = {CHANGE_RATE: [1, 7],
BACKFILL_FRACTION: [60]}
LAG_SPLITS = list(range(-1, 15)) + [28, 42, 60]
bv_names = {("cr", 7): "7-day change rate",
("cr", 1): "Daily change rate",
("frc", 60): "Backfill Fraction (anchor=60)"}
count_names = {"Covid": "COVID counts", "Denom": "Total counts"}
geo_names = {"fips": "county", "state_id": "state"}
| true | true |
f733a4997545f5675b7d476938d494363e9bac81 | 2,379 | py | Python | gilda/resources/__init__.py | steppi/gilda | 4927469e5f9a4ca20a056f617c126fe6a4bf3b34 | [
"BSD-2-Clause"
] | null | null | null | gilda/resources/__init__.py | steppi/gilda | 4927469e5f9a4ca20a056f617c126fe6a4bf3b34 | [
"BSD-2-Clause"
] | null | null | null | gilda/resources/__init__.py | steppi/gilda | 4927469e5f9a4ca20a056f617c126fe6a4bf3b34 | [
"BSD-2-Clause"
] | null | null | null | import os
import boto3
import pystow
import logging
import botocore
from gilda import __version__
logger = logging.getLogger(__name__)
HERE = os.path.abspath(os.path.dirname(__file__))
MESH_MAPPINGS_PATH = os.path.join(HERE, 'mesh_mappings.tsv')
resource_dir = pystow.join('gilda', __version__)
GROUNDING_TERMS_BASE_NAME = 'grounding_terms.tsv'
GROUNDING_TERMS_PATH = os.path.join(resource_dir, GROUNDING_TERMS_BASE_NAME)
# Popular organisms per UniProt, see
# https://www.uniprot.org/help/filter_options
popular_organisms = ['9606', '10090', '10116', '9913', '7955', '7227',
'6239', '44689', '3702', '39947', '83333', '224308',
'559292']
# NOTE: these are not all exact mappings..
# Several mappings here are to the closest match which works correctly
# in this setting but isn't generally speaking a valid xref.
taxonomy_to_mesh = {
'9606': 'D006801',
'10090': 'D051379',
'10116': 'D051381',
'9913': 'D002417',
'7955': 'D015027',
'7227': 'D004331',
'6239': 'D017173',
'44689': 'D004023',
'3702': 'D017360',
'39947': 'D012275',
'83333': 'D048168',
'224308': 'D001412',
'559292': 'D012441',
}
mesh_to_taxonomy = {v: k for k, v in taxonomy_to_mesh.items()}
def _download_from_s3(path, base_name):
config = botocore.client.Config(signature_version=botocore.UNSIGNED)
s3 = boto3.client('s3', config=config)
tc = boto3.s3.transfer.TransferConfig(use_threads=False)
full_key = '%s/%s' % (__version__, base_name)
out_file = os.path.join(path, base_name)
s3.download_file('gilda', full_key, out_file, Config=tc)
return out_file
def get_grounding_terms():
base_name = GROUNDING_TERMS_BASE_NAME
full_path = GROUNDING_TERMS_PATH
if not os.path.exists(full_path):
logger.info('Downloading grounding terms from S3.')
out_file = _download_from_s3(resource_dir, base_name)
logger.info('Saved grounding terms into: %s' % out_file)
return full_path
def get_gilda_models():
base_name = 'gilda_models.pkl'
full_path = os.path.join(resource_dir, base_name)
if not os.path.exists(full_path):
logger.info('Downloading disambiguation models from S3.')
out_file = _download_from_s3(resource_dir, base_name)
logger.info('Saved disambiguation models into: %s' % out_file)
return full_path
| 32.148649 | 76 | 0.691887 | import os
import boto3
import pystow
import logging
import botocore
from gilda import __version__
logger = logging.getLogger(__name__)
HERE = os.path.abspath(os.path.dirname(__file__))
MESH_MAPPINGS_PATH = os.path.join(HERE, 'mesh_mappings.tsv')
resource_dir = pystow.join('gilda', __version__)
GROUNDING_TERMS_BASE_NAME = 'grounding_terms.tsv'
GROUNDING_TERMS_PATH = os.path.join(resource_dir, GROUNDING_TERMS_BASE_NAME)
popular_organisms = ['9606', '10090', '10116', '9913', '7955', '7227',
'6239', '44689', '3702', '39947', '83333', '224308',
'559292']
taxonomy_to_mesh = {
'9606': 'D006801',
'10090': 'D051379',
'10116': 'D051381',
'9913': 'D002417',
'7955': 'D015027',
'7227': 'D004331',
'6239': 'D017173',
'44689': 'D004023',
'3702': 'D017360',
'39947': 'D012275',
'83333': 'D048168',
'224308': 'D001412',
'559292': 'D012441',
}
mesh_to_taxonomy = {v: k for k, v in taxonomy_to_mesh.items()}
def _download_from_s3(path, base_name):
config = botocore.client.Config(signature_version=botocore.UNSIGNED)
s3 = boto3.client('s3', config=config)
tc = boto3.s3.transfer.TransferConfig(use_threads=False)
full_key = '%s/%s' % (__version__, base_name)
out_file = os.path.join(path, base_name)
s3.download_file('gilda', full_key, out_file, Config=tc)
return out_file
def get_grounding_terms():
base_name = GROUNDING_TERMS_BASE_NAME
full_path = GROUNDING_TERMS_PATH
if not os.path.exists(full_path):
logger.info('Downloading grounding terms from S3.')
out_file = _download_from_s3(resource_dir, base_name)
logger.info('Saved grounding terms into: %s' % out_file)
return full_path
def get_gilda_models():
base_name = 'gilda_models.pkl'
full_path = os.path.join(resource_dir, base_name)
if not os.path.exists(full_path):
logger.info('Downloading disambiguation models from S3.')
out_file = _download_from_s3(resource_dir, base_name)
logger.info('Saved disambiguation models into: %s' % out_file)
return full_path
| true | true |
f733a4ab524b61a9d3221a0d312b6a6eb8c4d96e | 2,394 | py | Python | zoomus/components/report.py | ROMBOTics/zoomus | ee3f8956dcdb0b58367e413bccb6cab0b5b99b83 | [
"Apache-2.0"
] | null | null | null | zoomus/components/report.py | ROMBOTics/zoomus | ee3f8956dcdb0b58367e413bccb6cab0b5b99b83 | [
"Apache-2.0"
] | null | null | null | zoomus/components/report.py | ROMBOTics/zoomus | ee3f8956dcdb0b58367e413bccb6cab0b5b99b83 | [
"Apache-2.0"
] | null | null | null | """Zoom.us REST API Python Client -- Report component"""
from __future__ import absolute_import
from zoomus import util
from zoomus.components import base
class ReportComponent(base.BaseComponent):
"""Component dealing with all report related matters"""
def get_account_report(self, **kwargs):
util.require_keys(kwargs, ["start_time", "end_time"], kwargs)
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.post_request("/report/getaccountreport", params=kwargs)
def get_user_report(self, **kwargs):
util.require_keys(kwargs, ["start_time", "end_time"], kwargs)
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.post_request("/report/getuserreport", params=kwargs)
def get_daily_report(self, **kwargs):
util.require_keys(kwargs, ["month", "year"], kwargs)
return self.post_request("/report/getdailyreport", params=kwargs)
class ReportComponentV2(base.BaseComponent):
def get_user_report(self, **kwargs):
util.require_keys(kwargs, ["user_id", "start_time", "end_time"])
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.get_request(
"/report/users/{}/meetings".format(kwargs.get("user_id")), params=kwargs
)
def get_account_report(self, **kwargs):
util.require_keys(kwargs, ["start_time", "end_time"])
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.get_request("/report/users", params=kwargs)
def get_daily_report(self, **kwargs):
util.require_keys(kwargs, ["month", "year"])
return self.get_request("/report/daily", params=kwargs)
def get_meeting_participants_report(self, **kwargs):
util.require_keys(kwargs, "id")
return self.get_request("/report/meetings/{}/participants".format(kwargs.get("id")), params=kwargs)
| 40.576271 | 107 | 0.664996 |
from __future__ import absolute_import
from zoomus import util
from zoomus.components import base
class ReportComponent(base.BaseComponent):
def get_account_report(self, **kwargs):
util.require_keys(kwargs, ["start_time", "end_time"], kwargs)
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.post_request("/report/getaccountreport", params=kwargs)
def get_user_report(self, **kwargs):
util.require_keys(kwargs, ["start_time", "end_time"], kwargs)
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.post_request("/report/getuserreport", params=kwargs)
def get_daily_report(self, **kwargs):
util.require_keys(kwargs, ["month", "year"], kwargs)
return self.post_request("/report/getdailyreport", params=kwargs)
class ReportComponentV2(base.BaseComponent):
def get_user_report(self, **kwargs):
util.require_keys(kwargs, ["user_id", "start_time", "end_time"])
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.get_request(
"/report/users/{}/meetings".format(kwargs.get("user_id")), params=kwargs
)
def get_account_report(self, **kwargs):
util.require_keys(kwargs, ["start_time", "end_time"])
kwargs["from"] = util.date_to_str(kwargs["start_time"])
del kwargs["start_time"]
kwargs["to"] = util.date_to_str(kwargs["end_time"])
del kwargs["end_time"]
return self.get_request("/report/users", params=kwargs)
def get_daily_report(self, **kwargs):
util.require_keys(kwargs, ["month", "year"])
return self.get_request("/report/daily", params=kwargs)
def get_meeting_participants_report(self, **kwargs):
util.require_keys(kwargs, "id")
return self.get_request("/report/meetings/{}/participants".format(kwargs.get("id")), params=kwargs)
| true | true |
f733a55a8170ce1efb00e13a130b28157800c40a | 374 | py | Python | test_project/apps/cds/admin.py | int-y1/dmoj-wpadmin | 81a9ccd476830e9467d209ba98d348daca040d2a | [
"MIT"
] | 4 | 2017-11-17T21:42:39.000Z | 2022-02-17T23:35:05.000Z | test_project/apps/cds/admin.py | int-y1/dmoj-wpadmin | 81a9ccd476830e9467d209ba98d348daca040d2a | [
"MIT"
] | 3 | 2017-11-20T18:08:30.000Z | 2019-09-04T19:40:55.000Z | test_project/apps/cds/admin.py | int-y1/dmoj-wpadmin | 81a9ccd476830e9467d209ba98d348daca040d2a | [
"MIT"
] | 9 | 2016-11-15T13:46:00.000Z | 2021-11-09T04:27:01.000Z | from django.contrib import admin
class CdCategoryAdmin(admin.ModelAdmin):
pass
class CdAdmin(admin.ModelAdmin):
pass
class UserCdAdmin(admin.ModelAdmin):
def get_queryset(self, request):
"""
Show only current user's objects.
"""
qs = super(UserCdAdmin, self).queryset(request)
return qs.filter(owner=request.user)
| 17.809524 | 55 | 0.665775 | from django.contrib import admin
class CdCategoryAdmin(admin.ModelAdmin):
pass
class CdAdmin(admin.ModelAdmin):
pass
class UserCdAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs = super(UserCdAdmin, self).queryset(request)
return qs.filter(owner=request.user)
| true | true |
f733a583fa3db787f40f40a07138e1bc8afad912 | 3,616 | py | Python | python3/barchart3.py | iceihehe/pipeg | c5ed0a3bde23862bc4fffb0751df0bd2c0334a90 | [
"MIT"
] | null | null | null | python3/barchart3.py | iceihehe/pipeg | c5ed0a3bde23862bc4fffb0751df0bd2c0334a90 | [
"MIT"
] | null | null | null | python3/barchart3.py | iceihehe/pipeg | c5ed0a3bde23862bc4fffb0751df0bd2c0334a90 | [
"MIT"
] | 2 | 2020-01-31T15:17:27.000Z | 2020-05-28T13:49:53.000Z | #!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import abc
import os
import re
import tempfile
import Qtrac
try:
import cyImage as Image
except ImportError:
import Image
def main():
pairs = (("Mon", 16), ("Tue", 17), ("Wed", 19), ("Thu", 22),
("Fri", 24), ("Sat", 21), ("Sun", 19))
textBarCharter = BarCharter(TextBarRenderer())
textBarCharter.render("Forecast 6/8", pairs)
imageBarCharter = BarCharter(ImageBarRenderer())
imageBarCharter.render("Forecast 6/8", pairs)
class BarRenderer(Qtrac.Requirer):
required_methods = {"initialize", "draw_caption", "draw_bar",
"finalize"}
class BarCharter:
def __init__(self, renderer):
if not isinstance(renderer, BarRenderer):
raise TypeError("Expected object of type BarRenderer, got {}".
format(type(renderer).__name__))
self.__renderer = renderer
def render(self, caption, pairs):
maximum = max(value for _, value in pairs)
self.__renderer.initialize(len(pairs), maximum)
self.__renderer.draw_caption(caption)
for name, value in pairs:
self.__renderer.draw_bar(name, value)
self.__renderer.finalize()
class TextBarRenderer:
def __init__(self, scaleFactor=40):
self.scaleFactor = scaleFactor
def initialize(self, bars, maximum):
assert bars > 0 and maximum > 0
self.scale = self.scaleFactor / maximum
def draw_caption(self, caption):
print("{0:^{2}}\n{1:^{2}}".format(caption, "=" * len(caption),
self.scaleFactor))
def draw_bar(self, name, value):
print("{} {}".format("*" * int(value * self.scale), name))
def finalize(self):
pass
class ImageBarRenderer:
COLORS = [Image.color_for_name(name) for name in ("red", "green",
"blue", "yellow", "magenta", "cyan")]
def __init__(self, stepHeight=10, barWidth=30, barGap=2):
self.stepHeight = stepHeight
self.barWidth = barWidth
self.barGap = barGap
def initialize(self, bars, maximum):
assert bars > 0 and maximum > 0
self.index = 0
color = Image.color_for_name("white")
self.image = Image.Image(bars * (self.barWidth + self.barGap),
maximum * self.stepHeight, background=color)
def draw_caption(self, caption):
self.filename = os.path.join(tempfile.gettempdir(),
re.sub(r"\W+", "_", caption) + ".xpm")
def draw_bar(self, name, value):
color = ImageBarRenderer.COLORS[self.index %
len(ImageBarRenderer.COLORS)]
width, height = self.image.size
x0 = self.index * (self.barWidth + self.barGap)
x1 = x0 + self.barWidth
y0 = height - (value * self.stepHeight)
y1 = height - 1
self.image.rectangle(x0, y0, x1, y1, fill=color)
self.index += 1
def finalize(self):
self.image.save(self.filename)
print("wrote", self.filename)
if __name__ == "__main__":
main()
| 29.398374 | 74 | 0.634956 |
import abc
import os
import re
import tempfile
import Qtrac
try:
import cyImage as Image
except ImportError:
import Image
def main():
pairs = (("Mon", 16), ("Tue", 17), ("Wed", 19), ("Thu", 22),
("Fri", 24), ("Sat", 21), ("Sun", 19))
textBarCharter = BarCharter(TextBarRenderer())
textBarCharter.render("Forecast 6/8", pairs)
imageBarCharter = BarCharter(ImageBarRenderer())
imageBarCharter.render("Forecast 6/8", pairs)
class BarRenderer(Qtrac.Requirer):
required_methods = {"initialize", "draw_caption", "draw_bar",
"finalize"}
class BarCharter:
def __init__(self, renderer):
if not isinstance(renderer, BarRenderer):
raise TypeError("Expected object of type BarRenderer, got {}".
format(type(renderer).__name__))
self.__renderer = renderer
def render(self, caption, pairs):
maximum = max(value for _, value in pairs)
self.__renderer.initialize(len(pairs), maximum)
self.__renderer.draw_caption(caption)
for name, value in pairs:
self.__renderer.draw_bar(name, value)
self.__renderer.finalize()
class TextBarRenderer:
def __init__(self, scaleFactor=40):
self.scaleFactor = scaleFactor
def initialize(self, bars, maximum):
assert bars > 0 and maximum > 0
self.scale = self.scaleFactor / maximum
def draw_caption(self, caption):
print("{0:^{2}}\n{1:^{2}}".format(caption, "=" * len(caption),
self.scaleFactor))
def draw_bar(self, name, value):
print("{} {}".format("*" * int(value * self.scale), name))
def finalize(self):
pass
class ImageBarRenderer:
COLORS = [Image.color_for_name(name) for name in ("red", "green",
"blue", "yellow", "magenta", "cyan")]
def __init__(self, stepHeight=10, barWidth=30, barGap=2):
self.stepHeight = stepHeight
self.barWidth = barWidth
self.barGap = barGap
def initialize(self, bars, maximum):
assert bars > 0 and maximum > 0
self.index = 0
color = Image.color_for_name("white")
self.image = Image.Image(bars * (self.barWidth + self.barGap),
maximum * self.stepHeight, background=color)
def draw_caption(self, caption):
self.filename = os.path.join(tempfile.gettempdir(),
re.sub(r"\W+", "_", caption) + ".xpm")
def draw_bar(self, name, value):
color = ImageBarRenderer.COLORS[self.index %
len(ImageBarRenderer.COLORS)]
width, height = self.image.size
x0 = self.index * (self.barWidth + self.barGap)
x1 = x0 + self.barWidth
y0 = height - (value * self.stepHeight)
y1 = height - 1
self.image.rectangle(x0, y0, x1, y1, fill=color)
self.index += 1
def finalize(self):
self.image.save(self.filename)
print("wrote", self.filename)
if __name__ == "__main__":
main()
| true | true |
f733a58f07b491a8ffaa457c1e4b312d3027bea5 | 473 | py | Python | examples/sklearn_logistic_regression/train.py | iPieter/kiwi | 76b66872fce68873809a0dea112e2ed552ae5b63 | [
"Apache-2.0"
] | null | null | null | examples/sklearn_logistic_regression/train.py | iPieter/kiwi | 76b66872fce68873809a0dea112e2ed552ae5b63 | [
"Apache-2.0"
] | 1 | 2021-01-24T13:34:51.000Z | 2021-01-24T13:34:51.000Z | examples/sklearn_logistic_regression/train.py | iPieter/kiwi | 76b66872fce68873809a0dea112e2ed552ae5b63 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from sklearn.linear_model import LogisticRegression
import kiwi
import kiwi.sklearn
if __name__ == "__main__":
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression()
lr.fit(X, y)
score = lr.score(X, y)
print("Score: %s" % score)
kiwi.log_metric("score", score)
kiwi.sklearn.log_model(lr, "model")
print("Model saved in run %s" % kiwi.active_run().info.run_uuid)
| 27.823529 | 68 | 0.632135 | import numpy as np
from sklearn.linear_model import LogisticRegression
import kiwi
import kiwi.sklearn
if __name__ == "__main__":
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression()
lr.fit(X, y)
score = lr.score(X, y)
print("Score: %s" % score)
kiwi.log_metric("score", score)
kiwi.sklearn.log_model(lr, "model")
print("Model saved in run %s" % kiwi.active_run().info.run_uuid)
| true | true |
f733a5c43dfb3635c0debe9c082e090f349107b6 | 19,762 | py | Python | pandas/core/computation/pytables.py | cf-vrgl/pandas | 6f18ef68903591a18507f42763c862333d5470d9 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-08-06T14:27:43.000Z | 2021-08-06T14:27:56.000Z | pandas/core/computation/pytables.py | ra1nty/pandas | 0b68d87a4438a13f14a2ed5af2e432df02eb0b2c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/computation/pytables.py | ra1nty/pandas | 0b68d87a4438a13f14a2ed5af2e432df02eb0b2c | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-06-22T14:36:40.000Z | 2021-06-22T14:36:40.000Z | """ manage PyTables query interface via Expressions """
from __future__ import annotations
import ast
from functools import partial
from typing import Any
import numpy as np
from pandas._libs.tslibs import (
Timedelta,
Timestamp,
)
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.core.computation import (
expr,
ops,
scope as _scope,
)
from pandas.core.computation.common import ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import (
UndefinedVariableError,
is_term,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.base import Index
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: dict[str, Any] | None = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or {}
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
if isinstance(name, str):
klass = cls
else:
klass = Constant
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
# must be a queryables
if self.side == "left":
# Note: The behavior of __new__ ensures that self.name is a str here
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
# resolve the rhs (and allow it to be None)
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
# read-only property overwriting read/write property
@property # type: ignore[misc]
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: dict[str, Any]
condition: str | None
def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
"""create and return a new specialized BinOp from myself"""
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
"""inplace conform rhs"""
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
"""return True if this is a valid field"""
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
"""
return True if this is a valid column name for generation (e.g. an
actual column in the table)
"""
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
"""the kind of my field"""
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
"""the meta of my field"""
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
"""the metadata of my field"""
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
"""create and return the op string for this TermValue"""
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> TermValue:
"""
convert the expression that is in the term to something that is
accepted by pytables
"""
def stringify(value):
if self.encoding is not None:
return pprint_thing_encoded(value, encoding=self.encoding)
return pprint_thing(value)
kind = ensure_decoded(self.kind)
meta = ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
if isinstance(v, str):
v = Timedelta(v).value
else:
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = extract_array(self.metadata, extract_numpy=True)
if v not in metadata:
result = -1
else:
# error: Incompatible types in assignment (expression has type
# "Union[Any, ndarray]", variable has type "int")
result = metadata.searchsorted( # type: ignore[assignment]
v, side="left"
)
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
# string quoting
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: tuple[Any, Any, Index] | None = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
"""invert the filter"""
if self.filter is not None:
self.filter = (
self.filter[0],
self.generate_filter_op(invert=True),
self.filter[2],
)
return self
def format(self):
"""return the actual filter format"""
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
# if too many values to create the expression, use a filter instead
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
return self
return None
# equality conditions
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
"""invert the condition"""
# if self.condition is not None:
# self.condition = "~(%s)" % self.condition
# return self
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
"""return the actual ne format"""
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
# convert values if we are in the table
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
# equality conditions
if self.op in ["==", "!="]:
# too many values to create the expression?
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({' | '.join(vs)})"
# use a filter after reading
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None and (
issubclass(klass, ConditionBinOp)
and operand.condition is not None
or not issubclass(klass, ConditionBinOp)
and issubclass(klass, FilterBinOp)
and operand.filter is not None
):
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
# only allow simple subscripts
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
if isinstance(slobj, Term):
# In py39 np.ndarray lookups with Term containing int raise
slobj = slobj.value
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
raise ValueError(
f"cannot subscript {repr(value)} with {repr(slobj)}"
) from err
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
# resolve the value
resolved = self.visit(value)
# try to get the value to see if we are another expression
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
"""
Validate that the where statement is of the right type.
The type may either be String, Expr, or list-like of Exprs.
Parameters
----------
w : String term expression, Expr, or list-like of Exprs.
Returns
-------
where : The original where clause if the check was successful.
Raises
------
TypeError : An invalid data type was passed in for w (e.g. dict).
"""
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
"""
Hold a pytables-like expression, comprised of possibly multiple 'terms'.
Parameters
----------
where : string term expression, PyTablesExpr, or list-like of PyTablesExprs
queryables : a "kinds" map (dict of column name -> kind), or None if column
is non-indexable
encoding : an encoding that will encode the query terms
Returns
-------
a PyTablesExpr object
Examples
--------
'index>=date'
"columns=['A', 'D']"
'columns=A'
'columns==A'
"~(columns=['A','B'])"
'index>df.index[3] & string="bar"'
'(index>df.index[3] & index<=df.index[6]) | string="bar"'
"ts>=Timestamp('2012-02-01')"
"major_axis>=20130101"
"""
_visitor: PyTablesExprVisitor | None
env: PyTablesScope
expr: str
def __init__(
self,
where,
queryables: dict[str, Any] | None = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
# capture the environment if needed
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif is_list_like(where):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join(f"({w})" for w in com.flatten(where))
else:
# _validate_where ensures we otherwise have a string
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
"""create and return the numexpr condition and filter"""
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
) from err
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
) from err
return self.condition, self.filter
class TermValue:
"""hold a term value the we use to construct a condition/filter"""
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
"""quote the string if not encoded else encode and return"""
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
# python 2 str(float) is not always
# round-trippable so use repr()
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
"""loose checking if s is a pytables-acceptable expression"""
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
# make sure we have an op at least
return any(op in s for op in ops)
| 30.125 | 88 | 0.565985 | from __future__ import annotations
import ast
from functools import partial
from typing import Any
import numpy as np
from pandas._libs.tslibs import (
Timedelta,
Timestamp,
)
from pandas.compat.chainmap import DeepChainMap
from pandas.core.dtypes.common import is_list_like
import pandas.core.common as com
from pandas.core.computation import (
expr,
ops,
scope as _scope,
)
from pandas.core.computation.common import ensure_decoded
from pandas.core.computation.expr import BaseExprVisitor
from pandas.core.computation.ops import (
UndefinedVariableError,
is_term,
)
from pandas.core.construction import extract_array
from pandas.core.indexes.base import Index
from pandas.io.formats.printing import (
pprint_thing,
pprint_thing_encoded,
)
class PyTablesScope(_scope.Scope):
__slots__ = ("queryables",)
queryables: dict[str, Any]
def __init__(
self,
level: int,
global_dict=None,
local_dict=None,
queryables: dict[str, Any] | None = None,
):
super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict)
self.queryables = queryables or {}
class Term(ops.Term):
env: PyTablesScope
def __new__(cls, name, env, side=None, encoding=None):
if isinstance(name, str):
klass = cls
else:
klass = Constant
return object.__new__(klass)
def __init__(self, name, env: PyTablesScope, side=None, encoding=None):
super().__init__(name, env, side=side, encoding=encoding)
def _resolve_name(self):
if self.side == "left":
if self.name not in self.env.queryables:
raise NameError(f"name {repr(self.name)} is not defined")
return self.name
try:
return self.env.resolve(self.name, is_local=False)
except UndefinedVariableError:
return self.name
@property
def value(self):
return self._value
class Constant(Term):
def __init__(self, value, env: PyTablesScope, side=None, encoding=None):
assert isinstance(env, PyTablesScope), type(env)
super().__init__(value, env, side=side, encoding=encoding)
def _resolve_name(self):
return self._name
class BinOp(ops.BinOp):
_max_selectors = 31
op: str
queryables: dict[str, Any]
condition: str | None
def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding):
super().__init__(op, lhs, rhs)
self.queryables = queryables
self.encoding = encoding
self.condition = None
def _disallow_scalar_only_bool_ops(self):
pass
def prune(self, klass):
def pr(left, right):
if left is None:
return right
elif right is None:
return left
k = klass
if isinstance(left, ConditionBinOp):
if isinstance(right, ConditionBinOp):
k = JointConditionBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
elif isinstance(left, FilterBinOp):
if isinstance(right, FilterBinOp):
k = JointFilterBinOp
elif isinstance(left, k):
return left
elif isinstance(right, k):
return right
return k(
self.op, left, right, queryables=self.queryables, encoding=self.encoding
).evaluate()
left, right = self.lhs, self.rhs
if is_term(left) and is_term(right):
res = pr(left.value, right.value)
elif not is_term(left) and is_term(right):
res = pr(left.prune(klass), right.value)
elif is_term(left) and not is_term(right):
res = pr(left.value, right.prune(klass))
elif not (is_term(left) or is_term(right)):
res = pr(left.prune(klass), right.prune(klass))
return res
def conform(self, rhs):
if not is_list_like(rhs):
rhs = [rhs]
if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
@property
def is_valid(self) -> bool:
return self.lhs in self.queryables
@property
def is_in_table(self) -> bool:
return self.queryables.get(self.lhs) is not None
@property
def kind(self):
return getattr(self.queryables.get(self.lhs), "kind", None)
@property
def meta(self):
return getattr(self.queryables.get(self.lhs), "meta", None)
@property
def metadata(self):
return getattr(self.queryables.get(self.lhs), "metadata", None)
def generate(self, v) -> str:
val = v.tostring(self.encoding)
return f"({self.lhs} {self.op} {val})"
def convert_value(self, v) -> TermValue:
def stringify(value):
if self.encoding is not None:
return pprint_thing_encoded(value, encoding=self.encoding)
return pprint_thing(value)
kind = ensure_decoded(self.kind)
meta = ensure_decoded(self.meta)
if kind == "datetime64" or kind == "datetime":
if isinstance(v, (int, float)):
v = stringify(v)
v = ensure_decoded(v)
v = Timestamp(v)
if v.tz is not None:
v = v.tz_convert("UTC")
return TermValue(v, v.value, kind)
elif kind == "timedelta64" or kind == "timedelta":
if isinstance(v, str):
v = Timedelta(v).value
else:
v = Timedelta(v, unit="s").value
return TermValue(int(v), v, kind)
elif meta == "category":
metadata = extract_array(self.metadata, extract_numpy=True)
if v not in metadata:
result = -1
else:
result = metadata.searchsorted(
v, side="left"
)
return TermValue(result, result, "integer")
elif kind == "integer":
v = int(float(v))
return TermValue(v, v, kind)
elif kind == "float":
v = float(v)
return TermValue(v, v, kind)
elif kind == "bool":
if isinstance(v, str):
v = not v.strip().lower() in [
"false",
"f",
"no",
"n",
"none",
"0",
"[]",
"{}",
"",
]
else:
v = bool(v)
return TermValue(v, v, kind)
elif isinstance(v, str):
return TermValue(v, stringify(v), "string")
else:
raise TypeError(f"Cannot compare {v} of type {type(v)} to {kind} column")
def convert_values(self):
pass
class FilterBinOp(BinOp):
filter: tuple[Any, Any, Index] | None = None
def __repr__(self) -> str:
if self.filter is None:
return "Filter: Not Initialized"
return pprint_thing(f"[Filter : [{self.filter[0]}] -> [{self.filter[1]}]")
def invert(self):
if self.filter is not None:
self.filter = (
self.filter[0],
self.generate_filter_op(invert=True),
self.filter[2],
)
return self
def format(self):
return [self.filter]
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
rhs = self.conform(self.rhs)
values = list(rhs)
if self.is_in_table:
if self.op in ["==", "!="] and len(values) > self._max_selectors:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
return self
return None
if self.op in ["==", "!="]:
filter_op = self.generate_filter_op()
self.filter = (self.lhs, filter_op, Index(values))
else:
raise TypeError(
f"passing a filterable condition to a non-table indexer [{self}]"
)
return self
def generate_filter_op(self, invert: bool = False):
if (self.op == "!=" and not invert) or (self.op == "==" and invert):
return lambda axis, vals: ~axis.isin(vals)
else:
return lambda axis, vals: axis.isin(vals)
class JointFilterBinOp(FilterBinOp):
def format(self):
raise NotImplementedError("unable to collapse Joint Filters")
def evaluate(self):
return self
class ConditionBinOp(BinOp):
def __repr__(self) -> str:
return pprint_thing(f"[Condition : [{self.condition}]]")
def invert(self):
raise NotImplementedError(
"cannot use an invert condition when passing to numexpr"
)
def format(self):
return self.condition
def evaluate(self):
if not self.is_valid:
raise ValueError(f"query term is not valid [{self}]")
if not self.is_in_table:
return None
rhs = self.conform(self.rhs)
values = [self.convert_value(v) for v in rhs]
if self.op in ["==", "!="]:
if len(values) <= self._max_selectors:
vs = [self.generate(v) for v in values]
self.condition = f"({' | '.join(vs)})"
else:
return None
else:
self.condition = self.generate(values[0])
return self
class JointConditionBinOp(ConditionBinOp):
def evaluate(self):
self.condition = f"({self.lhs.condition} {self.op} {self.rhs.condition})"
return self
class UnaryOp(ops.UnaryOp):
def prune(self, klass):
if self.op != "~":
raise NotImplementedError("UnaryOp only support invert type ops")
operand = self.operand
operand = operand.prune(klass)
if operand is not None and (
issubclass(klass, ConditionBinOp)
and operand.condition is not None
or not issubclass(klass, ConditionBinOp)
and issubclass(klass, FilterBinOp)
and operand.filter is not None
):
return operand.invert()
return None
class PyTablesExprVisitor(BaseExprVisitor):
const_type = Constant
term_type = Term
def __init__(self, env, engine, parser, **kwargs):
super().__init__(env, engine, parser)
for bin_op in self.binary_ops:
bin_node = self.binary_op_nodes_map[bin_op]
setattr(
self,
f"visit_{bin_node}",
lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs),
)
def visit_UnaryOp(self, node, **kwargs):
if isinstance(node.op, (ast.Not, ast.Invert)):
return UnaryOp("~", self.visit(node.operand))
elif isinstance(node.op, ast.USub):
return self.const_type(-self.visit(node.operand).value, self.env)
elif isinstance(node.op, ast.UAdd):
raise NotImplementedError("Unary addition not supported")
def visit_Index(self, node, **kwargs):
return self.visit(node.value).value
def visit_Assign(self, node, **kwargs):
cmpr = ast.Compare(
ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]
)
return self.visit(cmpr)
def visit_Subscript(self, node, **kwargs):
value = self.visit(node.value)
slobj = self.visit(node.slice)
try:
value = value.value
except AttributeError:
pass
if isinstance(slobj, Term):
slobj = slobj.value
try:
return self.const_type(value[slobj], self.env)
except TypeError as err:
raise ValueError(
f"cannot subscript {repr(value)} with {repr(slobj)}"
) from err
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = type(node.ctx)
if ctx == ast.Load:
resolved = self.visit(value)
try:
resolved = resolved.value
except (AttributeError):
pass
try:
return self.term_type(getattr(resolved, attr), self.env)
except AttributeError:
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError(f"Invalid Attribute context {ctx.__name__}")
def translate_In(self, op):
return ast.Eq() if isinstance(op, ast.In) else op
def _rewrite_membership_op(self, node, left, right):
return self.visit(node.op), node.op, left, right
def _validate_where(w):
if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)):
raise TypeError(
"where must be passed as a string, PyTablesExpr, "
"or list-like of PyTablesExpr"
)
return w
class PyTablesExpr(expr.Expr):
_visitor: PyTablesExprVisitor | None
env: PyTablesScope
expr: str
def __init__(
self,
where,
queryables: dict[str, Any] | None = None,
encoding=None,
scope_level: int = 0,
):
where = _validate_where(where)
self.encoding = encoding
self.condition = None
self.filter = None
self.terms = None
self._visitor = None
local_dict: DeepChainMap[Any, Any] = DeepChainMap()
if isinstance(where, PyTablesExpr):
local_dict = where.env.scope
_where = where.expr
elif is_list_like(where):
where = list(where)
for idx, w in enumerate(where):
if isinstance(w, PyTablesExpr):
local_dict = w.env.scope
else:
w = _validate_where(w)
where[idx] = w
_where = " & ".join(f"({w})" for w in com.flatten(where))
else:
_where = where
self.expr = _where
self.env = PyTablesScope(scope_level + 1, local_dict=local_dict)
if queryables is not None and isinstance(self.expr, str):
self.env.queryables.update(queryables)
self._visitor = PyTablesExprVisitor(
self.env,
queryables=queryables,
parser="pytables",
engine="pytables",
encoding=encoding,
)
self.terms = self.parse()
def __repr__(self) -> str:
if self.terms is not None:
return pprint_thing(self.terms)
return pprint_thing(self.expr)
def evaluate(self):
try:
self.condition = self.terms.prune(ConditionBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid condition"
) from err
try:
self.filter = self.terms.prune(FilterBinOp)
except AttributeError as err:
raise ValueError(
f"cannot process expression [{self.expr}], [{self}] "
"is not a valid filter"
) from err
return self.condition, self.filter
class TermValue:
def __init__(self, value, converted, kind: str):
assert isinstance(kind, str), kind
self.value = value
self.converted = converted
self.kind = kind
def tostring(self, encoding) -> str:
if self.kind == "string":
if encoding is not None:
return str(self.converted)
return f'"{self.converted}"'
elif self.kind == "float":
return repr(self.converted)
return str(self.converted)
def maybe_expression(s) -> bool:
if not isinstance(s, str):
return False
ops = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ("=",)
return any(op in s for op in ops)
| true | true |
f733a73235b8fa505092ef970dc371a7512d47b3 | 67 | py | Python | classes/a/b.py | yubang/urlHander | 214ff33a9b6e96adf41a0176a86a62e0be335ef0 | [
"Apache-2.0"
] | null | null | null | classes/a/b.py | yubang/urlHander | 214ff33a9b6e96adf41a0176a86a62e0be335ef0 | [
"Apache-2.0"
] | null | null | null | classes/a/b.py | yubang/urlHander | 214ff33a9b6e96adf41a0176a86a62e0be335ef0 | [
"Apache-2.0"
] | null | null | null | #coding:UTF-8
class B():
def __init__(self,Data):
pass | 13.4 | 28 | 0.58209 |
class B():
def __init__(self,Data):
pass | true | true |
f733a7e1c1424f0decd52a9fc3f662cc5c5d53e3 | 896 | py | Python | python/hash_table/1200_minimum_absolute_difference.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/hash_table/1200_minimum_absolute_difference.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/hash_table/1200_minimum_absolute_difference.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | class Solution(object):
def minimumAbsDifference(self, arr):
"""
:type arr: List[int]
:rtype: List[List[int]]
"""
if len(arr) < 2:
return []
sa = sorted(arr)
min_diff = sa[1] - sa[0]
res = [[sa[0], sa[1]]]
for i in range(1, len(sa) - 1):
v = sa[i + 1] - sa[i]
if v < min_diff:
res = [[sa[i], sa[i + 1]]]
min_diff = v
continue
if v == min_diff:
res.append([sa[i], sa[i + 1]])
return res
def test_minimum_abs_difference():
s = Solution()
assert [[1, 2], [2, 3], [3, 4]] == s.minimumAbsDifference([4, 2, 1, 3])
assert [[1, 3]] == s.minimumAbsDifference([1, 3, 6, 10, 15])
assert [[-14, -10], [19, 23], [23, 27]
] == s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27])
| 30.896552 | 75 | 0.4375 | class Solution(object):
def minimumAbsDifference(self, arr):
if len(arr) < 2:
return []
sa = sorted(arr)
min_diff = sa[1] - sa[0]
res = [[sa[0], sa[1]]]
for i in range(1, len(sa) - 1):
v = sa[i + 1] - sa[i]
if v < min_diff:
res = [[sa[i], sa[i + 1]]]
min_diff = v
continue
if v == min_diff:
res.append([sa[i], sa[i + 1]])
return res
def test_minimum_abs_difference():
s = Solution()
assert [[1, 2], [2, 3], [3, 4]] == s.minimumAbsDifference([4, 2, 1, 3])
assert [[1, 3]] == s.minimumAbsDifference([1, 3, 6, 10, 15])
assert [[-14, -10], [19, 23], [23, 27]
] == s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27])
| true | true |
f733a82ce344d6f5f6181ef651b99b56a1adb95d | 889 | py | Python | moonstone/parsers/counts/taxonomy/kraken2.py | motleystate/moonstone | 37c38fabf361722f7002626ef13c68c443ace4ac | [
"MIT"
] | null | null | null | moonstone/parsers/counts/taxonomy/kraken2.py | motleystate/moonstone | 37c38fabf361722f7002626ef13c68c443ace4ac | [
"MIT"
] | 84 | 2020-07-27T13:01:12.000Z | 2022-03-16T17:10:23.000Z | moonstone/parsers/counts/taxonomy/kraken2.py | motleystate/moonstone | 37c38fabf361722f7002626ef13c68c443ace4ac | [
"MIT"
] | null | null | null | from pandas import DataFrame
from moonstone.parsers.counts.taxonomy.base import BaseTaxonomyCountsParser
class SunbeamKraken2Parser(BaseTaxonomyCountsParser):
"""
Parse output from `Kraken2 <https://ccb.jhu.edu/software/kraken2/>`_
merge table from `Sunbeam <https://github.com/sunbeam-labs/sunbeam/>`_ pipeline.
"""
taxa_column = 'Consensus Lineage'
new_otu_id_name = 'NCBI_taxonomy_ID'
def __init__(self, *args, **kwargs):
super().__init__(*args, parsing_options={'skiprows': 1}, **kwargs)
def _load_data(self) -> DataFrame:
df = super()._load_data()
# Rename first column to NCBI_taxonomy_ID
df.columns = [self.new_otu_id_name] + list(df.columns[1:])
df = self.split_taxa_fill_none(df, sep="; ", merge_genus_species=True)
df = df.set_index(self.taxonomical_names[:self.rank_level])
return df
| 35.56 | 84 | 0.692913 | from pandas import DataFrame
from moonstone.parsers.counts.taxonomy.base import BaseTaxonomyCountsParser
class SunbeamKraken2Parser(BaseTaxonomyCountsParser):
taxa_column = 'Consensus Lineage'
new_otu_id_name = 'NCBI_taxonomy_ID'
def __init__(self, *args, **kwargs):
super().__init__(*args, parsing_options={'skiprows': 1}, **kwargs)
def _load_data(self) -> DataFrame:
df = super()._load_data()
df.columns = [self.new_otu_id_name] + list(df.columns[1:])
df = self.split_taxa_fill_none(df, sep="; ", merge_genus_species=True)
df = df.set_index(self.taxonomical_names[:self.rank_level])
return df
| true | true |
f733a90584d0b7cdc7abb81d032882724c186521 | 10,849 | py | Python | server/app/services/tasks_scheduler/async_tasks/app/excels/devices_import.py | goodfree/ActorCloud | e8db470830ea6f6f208ad43c2e56a2e8976bc468 | [
"Apache-2.0"
] | 173 | 2019-06-10T07:14:49.000Z | 2022-03-31T08:42:36.000Z | server/app/services/tasks_scheduler/async_tasks/app/excels/devices_import.py | zlyz12345/ActorCloud | 9c34b371c23464981323ef9865d9913bde1fe09c | [
"Apache-2.0"
] | 27 | 2019-06-12T08:25:29.000Z | 2022-02-26T11:37:15.000Z | server/app/services/tasks_scheduler/async_tasks/app/excels/devices_import.py | zlyz12345/ActorCloud | 9c34b371c23464981323ef9865d9913bde1fe09c | [
"Apache-2.0"
] | 67 | 2019-06-10T08:40:05.000Z | 2022-03-09T03:43:56.000Z | import json
import logging
from collections import defaultdict
from datetime import datetime
from typing import Dict, AnyStr
import pandas as pd
from actor_libs.database.async_db import db
from actor_libs.tasks.backend import update_task
from actor_libs.tasks.exceptions import TaskException
from actor_libs.utils import generate_uuid
from ._utils import pg_to_excel
from ._utils import read_excel
from .multi_language import (
ImportStatus, STATUS_MESSAGE, IMPORT_RENAME_ZH, IMPORT_ERROR_RENAME
)
from .sql_statements import (
device_import_sql, dict_code_sql,
query_tenant_devices_limit_sql,
)
from .validate import validates_schema
from ..config import project_config
__all__ = ['devices_import_task']
logger = logging.getLogger(__name__)
async def devices_import_task(request_dict):
"""
{'taskID', 'language', 'filePath', 'tenantID', 'userIntID'}
"""
task_id = request_dict['taskID']
await _update_task_progress(
task_id, status=2, progress=10,
import_status=ImportStatus.UPLOADED
)
dict_code = await get_dict_code(request_dict['language'])
import_records = await read_devices_excels(
request_dict, dict_code
)
if not import_records:
await _update_task_progress(
request_dict['taskID'], status=4,
progress=15, import_status=ImportStatus.FAILED
)
raise TaskException(code=500, error_code='FAILED')
correct_records, error_records = await handle_import_records(
import_records, request_dict
)
correct_num, error_nums = len(correct_records), len(error_records)
result_info = {
'success': correct_num,
'failed': error_nums
}
if correct_num > 0:
await _import_correct_rows(correct_records, correct_num, request_dict)
if error_records:
try:
export_path = await _export_error_rows(
error_records, dict_code, request_dict
)
result_info['excelPath'] = export_path
except Exception as e:
logger.error(f"error_records: {e}")
await _update_task_progress(
request_dict['taskID'], status=3,
progress=100, import_status=ImportStatus.COMPLETED,
result=result_info,
)
async def get_dict_code(language: AnyStr) -> Dict:
dict_code = {}
query_dict_code = await db.fetch_many(
dict_code_sql.format(language=language)
)
for item in query_dict_code:
# {code:{label:value}...}
dict_code[item[0]] = dict(zip(item[2], item[1]))
return dict_code
async def read_devices_excels(request_dict: Dict, dict_code):
try:
rename_dict = IMPORT_RENAME_ZH if request_dict['language'] != 'en' else None
data_frame = await read_excel(
request_dict['filePath'], rename_dict=rename_dict,
replace_dict=dict_code
)
data_frame = await _handle_data_frame(data_frame)
import_records = data_frame.to_dict('records')
await _update_task_progress(
request_dict['taskID'], status=2,
progress=30, import_status=ImportStatus.READING
)
except Exception as e:
logger.error(f"read_devices_excels: {e}")
await _update_task_progress(
request_dict['taskID'], status=4,
progress=35, import_status=ImportStatus.TEMPLATE_ERROR
)
raise TaskException(code=500, error_code='TEMPLATE_ERROR')
return import_records
async def _handle_data_frame(data_frame):
cover_float = ['longitude', 'latitude']
data_frame[cover_float] = data_frame[cover_float].astype(float)
# nan -> None
data_frame = data_frame.where((pd.notnull(data_frame)), None)
return data_frame
async def handle_import_records(import_records, request_dict):
# use schema to validate imported data
correct_records = []
correct_record_append = correct_records.append
error_records = []
error_record_append = error_records.append
try:
validated_result = await validates_schema(
import_records, request_dict
)
await _update_task_progress(
request_dict['taskID'], status=2, progress=50,
import_status=ImportStatus.VALIDATING
)
except Exception as e:
logger.error(f"validates_schema: {e}")
await _update_task_progress(
request_dict['taskID'], status=4, progress=55,
import_status=ImportStatus.ABNORMAL
)
raise TaskException(code=500, error_code='ABNORMAL')
rows_error_msg, devices_attr_info = validated_result
products_info = devices_attr_info['products_info']
gateways_info = devices_attr_info['gateways_info']
for row, record in enumerate(import_records):
if rows_error_msg.get(row):
record.update(rows_error_msg[row])
error_record_append(record)
else:
product_name = record['product']
gateway_name = record['gateway']
if products_info.get(product_name):
record['productID'] = products_info[product_name]['productID']
record['cloudProtocol'] = products_info[product_name]['cloudProtocol']
if gateways_info.get(gateway_name):
record['gateway'] = gateways_info[gateway_name]['id']
record = await set_device_default_value(record)
correct_record_append(record)
return correct_records, error_records
async def _import_correct_rows(correct_records, correct_num, request_dict):
is_exceed_limit = await _check_devices_limit(correct_num, request_dict)
if is_exceed_limit:
await _update_task_progress(
request_dict['taskID'], status=4, progress=70,
import_status=ImportStatus.LIMITED
)
raise TaskException(code=500, error_code='LIMITED')
try:
await _insert_correct_rows(correct_records, request_dict)
await _update_task_progress(
request_dict['taskID'], status=2,
progress=80, import_status=ImportStatus.IMPORTING
)
except Exception as e:
logger.error(f"_import_correct_rows: {e}")
await _update_task_progress(
request_dict['taskID'], status=4,
progress=85, import_status=ImportStatus.FAILED
)
raise TaskException(code=500, error_code='FAILED')
async def _check_devices_limit(correct_num, request_dict) -> bool:
"""
Check if the device limit is exceeded
:return True if exceed limit otherwise False
"""
check_status = False
query_sql = query_tenant_devices_limit_sql.format(
tenantID=request_dict['tenantID']
)
query_result = await db.fetch_row(query_sql)
if query_result:
device_sum, devices_limit = query_result
if device_sum + correct_num > devices_limit:
check_status = True
return check_status
async def _insert_correct_rows(correct_records, request_dict):
default_columns = [
"createAt", "deviceName", "deviceType", "productID",
"authType", "upLinkNetwork", "deviceID", "deviceUsername", "token",
"location", "latitude", "longitude",
"manufacturer", "serialNumber", "softVersion", "hardwareVersion",
"deviceConsoleIP", "deviceConsoleUsername", "deviceConsolePort",
"mac", "upLinkSystem", "gateway", "parentDevice",
"loraData", "lwm2mData", "userIntID", "tenantID"
]
create_at = datetime.now()
async with db.pool.acquire() as conn:
async with conn.transaction():
for record in correct_records:
record['createAt'] = create_at
record['userIntID'] = request_dict['userIntID']
record['tenantID'] = request_dict['tenantID']
miss_columns = set(default_columns) - set(record.keys())
record.update({c: None for c in miss_columns})
execute_sql = device_import_sql.format(**record)
execute_sql = execute_sql.replace("'None'", "NULL")
execute_sql = execute_sql.replace("'NULL'", "NULL")
await conn.execute(execute_sql)
async def _export_error_rows(errors_rows, dict_code, request_dict):
""" Export processing failure data to excel """
column_sort = list(IMPORT_ERROR_RENAME.keys())
error_dict_code = defaultdict(dict)
for code, code_value in dict_code.items():
for code_k, code_v in code_value.items():
error_dict_code[code][code_v] = code_k
data_frame = pd.DataFrame(errors_rows)
data_frame = data_frame[column_sort].replace(error_dict_code)
if request_dict['language'] != 'en':
data_frame = data_frame.rename(columns=IMPORT_ERROR_RENAME)
state_dict = await pg_to_excel(
export_path=project_config.get('EXPORT_EXCEL_PATH'),
table_name='ErrorImportDevicesW5',
export_data=data_frame,
tenant_uid=request_dict['tenantID'])
export_path = state_dict.get('excelPath')
return export_path
async def set_device_default_value(device_info):
if device_info.get('upLinkSystem') != 3:
device_info['gateway'] = None
if device_info.get('upLinkSystem') == 3 and not device_info.get('gateway'):
device_info['upLinkSystem'] = 1
device_info['gateway'] = None
if device_info.get('cloudProtocol') == 3:
# lwm2m protocol
if device_info.get('deviceID'):
imei = device_info['deviceID']
else:
imei = generate_uuid(size=15)
device_info['deviceID'] = imei
lwm2m_data = {
'autoSub': 0,
'IMEI': imei,
'IMSI': imei
}
device_info['lwm2mData'] = json.dumps(lwm2m_data)
if not device_info.get('deviceID'):
device_info['deviceID'] = generate_uuid()
if not device_info.get('deviceUsername'):
device_info['deviceUsername'] = generate_uuid()
if not device_info.get('token'):
device_info['token'] = device_info['deviceUsername']
if not device_info.get('token'):
device_info['token'] = device_info['deviceUsername']
device_info['upLinkNetwork'] = 1
device_info['deviceType'] = 1 # end_devices
return device_info
async def _update_task_progress(task_id,
*,
status=None,
progress=None,
import_status=None,
result=None):
if not result:
result = {}
result['message'] = STATUS_MESSAGE.get(import_status)
result['code'] = import_status.value
update_dict = {
'status': status,
'progress': progress,
'result': result,
'taskID': task_id
}
await update_task(task_id, update_dict)
return result
| 36.284281 | 86 | 0.657019 | import json
import logging
from collections import defaultdict
from datetime import datetime
from typing import Dict, AnyStr
import pandas as pd
from actor_libs.database.async_db import db
from actor_libs.tasks.backend import update_task
from actor_libs.tasks.exceptions import TaskException
from actor_libs.utils import generate_uuid
from ._utils import pg_to_excel
from ._utils import read_excel
from .multi_language import (
ImportStatus, STATUS_MESSAGE, IMPORT_RENAME_ZH, IMPORT_ERROR_RENAME
)
from .sql_statements import (
device_import_sql, dict_code_sql,
query_tenant_devices_limit_sql,
)
from .validate import validates_schema
from ..config import project_config
__all__ = ['devices_import_task']
logger = logging.getLogger(__name__)
async def devices_import_task(request_dict):
task_id = request_dict['taskID']
await _update_task_progress(
task_id, status=2, progress=10,
import_status=ImportStatus.UPLOADED
)
dict_code = await get_dict_code(request_dict['language'])
import_records = await read_devices_excels(
request_dict, dict_code
)
if not import_records:
await _update_task_progress(
request_dict['taskID'], status=4,
progress=15, import_status=ImportStatus.FAILED
)
raise TaskException(code=500, error_code='FAILED')
correct_records, error_records = await handle_import_records(
import_records, request_dict
)
correct_num, error_nums = len(correct_records), len(error_records)
result_info = {
'success': correct_num,
'failed': error_nums
}
if correct_num > 0:
await _import_correct_rows(correct_records, correct_num, request_dict)
if error_records:
try:
export_path = await _export_error_rows(
error_records, dict_code, request_dict
)
result_info['excelPath'] = export_path
except Exception as e:
logger.error(f"error_records: {e}")
await _update_task_progress(
request_dict['taskID'], status=3,
progress=100, import_status=ImportStatus.COMPLETED,
result=result_info,
)
async def get_dict_code(language: AnyStr) -> Dict:
dict_code = {}
query_dict_code = await db.fetch_many(
dict_code_sql.format(language=language)
)
for item in query_dict_code:
dict_code[item[0]] = dict(zip(item[2], item[1]))
return dict_code
async def read_devices_excels(request_dict: Dict, dict_code):
try:
rename_dict = IMPORT_RENAME_ZH if request_dict['language'] != 'en' else None
data_frame = await read_excel(
request_dict['filePath'], rename_dict=rename_dict,
replace_dict=dict_code
)
data_frame = await _handle_data_frame(data_frame)
import_records = data_frame.to_dict('records')
await _update_task_progress(
request_dict['taskID'], status=2,
progress=30, import_status=ImportStatus.READING
)
except Exception as e:
logger.error(f"read_devices_excels: {e}")
await _update_task_progress(
request_dict['taskID'], status=4,
progress=35, import_status=ImportStatus.TEMPLATE_ERROR
)
raise TaskException(code=500, error_code='TEMPLATE_ERROR')
return import_records
async def _handle_data_frame(data_frame):
cover_float = ['longitude', 'latitude']
data_frame[cover_float] = data_frame[cover_float].astype(float)
data_frame = data_frame.where((pd.notnull(data_frame)), None)
return data_frame
async def handle_import_records(import_records, request_dict):
correct_records = []
correct_record_append = correct_records.append
error_records = []
error_record_append = error_records.append
try:
validated_result = await validates_schema(
import_records, request_dict
)
await _update_task_progress(
request_dict['taskID'], status=2, progress=50,
import_status=ImportStatus.VALIDATING
)
except Exception as e:
logger.error(f"validates_schema: {e}")
await _update_task_progress(
request_dict['taskID'], status=4, progress=55,
import_status=ImportStatus.ABNORMAL
)
raise TaskException(code=500, error_code='ABNORMAL')
rows_error_msg, devices_attr_info = validated_result
products_info = devices_attr_info['products_info']
gateways_info = devices_attr_info['gateways_info']
for row, record in enumerate(import_records):
if rows_error_msg.get(row):
record.update(rows_error_msg[row])
error_record_append(record)
else:
product_name = record['product']
gateway_name = record['gateway']
if products_info.get(product_name):
record['productID'] = products_info[product_name]['productID']
record['cloudProtocol'] = products_info[product_name]['cloudProtocol']
if gateways_info.get(gateway_name):
record['gateway'] = gateways_info[gateway_name]['id']
record = await set_device_default_value(record)
correct_record_append(record)
return correct_records, error_records
async def _import_correct_rows(correct_records, correct_num, request_dict):
is_exceed_limit = await _check_devices_limit(correct_num, request_dict)
if is_exceed_limit:
await _update_task_progress(
request_dict['taskID'], status=4, progress=70,
import_status=ImportStatus.LIMITED
)
raise TaskException(code=500, error_code='LIMITED')
try:
await _insert_correct_rows(correct_records, request_dict)
await _update_task_progress(
request_dict['taskID'], status=2,
progress=80, import_status=ImportStatus.IMPORTING
)
except Exception as e:
logger.error(f"_import_correct_rows: {e}")
await _update_task_progress(
request_dict['taskID'], status=4,
progress=85, import_status=ImportStatus.FAILED
)
raise TaskException(code=500, error_code='FAILED')
async def _check_devices_limit(correct_num, request_dict) -> bool:
check_status = False
query_sql = query_tenant_devices_limit_sql.format(
tenantID=request_dict['tenantID']
)
query_result = await db.fetch_row(query_sql)
if query_result:
device_sum, devices_limit = query_result
if device_sum + correct_num > devices_limit:
check_status = True
return check_status
async def _insert_correct_rows(correct_records, request_dict):
default_columns = [
"createAt", "deviceName", "deviceType", "productID",
"authType", "upLinkNetwork", "deviceID", "deviceUsername", "token",
"location", "latitude", "longitude",
"manufacturer", "serialNumber", "softVersion", "hardwareVersion",
"deviceConsoleIP", "deviceConsoleUsername", "deviceConsolePort",
"mac", "upLinkSystem", "gateway", "parentDevice",
"loraData", "lwm2mData", "userIntID", "tenantID"
]
create_at = datetime.now()
async with db.pool.acquire() as conn:
async with conn.transaction():
for record in correct_records:
record['createAt'] = create_at
record['userIntID'] = request_dict['userIntID']
record['tenantID'] = request_dict['tenantID']
miss_columns = set(default_columns) - set(record.keys())
record.update({c: None for c in miss_columns})
execute_sql = device_import_sql.format(**record)
execute_sql = execute_sql.replace("'None'", "NULL")
execute_sql = execute_sql.replace("'NULL'", "NULL")
await conn.execute(execute_sql)
async def _export_error_rows(errors_rows, dict_code, request_dict):
column_sort = list(IMPORT_ERROR_RENAME.keys())
error_dict_code = defaultdict(dict)
for code, code_value in dict_code.items():
for code_k, code_v in code_value.items():
error_dict_code[code][code_v] = code_k
data_frame = pd.DataFrame(errors_rows)
data_frame = data_frame[column_sort].replace(error_dict_code)
if request_dict['language'] != 'en':
data_frame = data_frame.rename(columns=IMPORT_ERROR_RENAME)
state_dict = await pg_to_excel(
export_path=project_config.get('EXPORT_EXCEL_PATH'),
table_name='ErrorImportDevicesW5',
export_data=data_frame,
tenant_uid=request_dict['tenantID'])
export_path = state_dict.get('excelPath')
return export_path
async def set_device_default_value(device_info):
if device_info.get('upLinkSystem') != 3:
device_info['gateway'] = None
if device_info.get('upLinkSystem') == 3 and not device_info.get('gateway'):
device_info['upLinkSystem'] = 1
device_info['gateway'] = None
if device_info.get('cloudProtocol') == 3:
if device_info.get('deviceID'):
imei = device_info['deviceID']
else:
imei = generate_uuid(size=15)
device_info['deviceID'] = imei
lwm2m_data = {
'autoSub': 0,
'IMEI': imei,
'IMSI': imei
}
device_info['lwm2mData'] = json.dumps(lwm2m_data)
if not device_info.get('deviceID'):
device_info['deviceID'] = generate_uuid()
if not device_info.get('deviceUsername'):
device_info['deviceUsername'] = generate_uuid()
if not device_info.get('token'):
device_info['token'] = device_info['deviceUsername']
if not device_info.get('token'):
device_info['token'] = device_info['deviceUsername']
device_info['upLinkNetwork'] = 1
device_info['deviceType'] = 1
return device_info
async def _update_task_progress(task_id,
*,
status=None,
progress=None,
import_status=None,
result=None):
if not result:
result = {}
result['message'] = STATUS_MESSAGE.get(import_status)
result['code'] = import_status.value
update_dict = {
'status': status,
'progress': progress,
'result': result,
'taskID': task_id
}
await update_task(task_id, update_dict)
return result
| true | true |
f733a91e095b06f619e764c15a427c4e402a3796 | 860 | py | Python | suzieq/engines/pandas/__init__.py | LucaNicosia/suzieq | c281807ea2c4f44a9d6cd6c80fd5b71277b3cdcd | [
"Apache-2.0"
] | null | null | null | suzieq/engines/pandas/__init__.py | LucaNicosia/suzieq | c281807ea2c4f44a9d6cd6c80fd5b71277b3cdcd | [
"Apache-2.0"
] | null | null | null | suzieq/engines/pandas/__init__.py | LucaNicosia/suzieq | c281807ea2c4f44a9d6cd6c80fd5b71277b3cdcd | [
"Apache-2.0"
] | null | null | null | import os
from importlib.util import find_spec
from importlib import import_module
import inspect
def get_engine_object(table, baseobj):
'''Return the appropriate class object to operate on the specified table'''
spec = find_spec('suzieq.engines.pandas')
for file in spec.loader.contents():
if (os.path.isfile(f'{os.path.dirname(spec.loader.path)}/{file}') and
not file.startswith('_')):
modname = file.split('.')[0]
mod = import_module(f'suzieq.engines.pandas.{modname}')
for mbr in inspect.getmembers(mod):
if inspect.isclass(mbr[1]) and mbr[0] != 'SqPandasEngine':
fn = getattr(mbr[1], 'table_name', '')
if fn and fn() == table:
return mbr[1](baseobj)
return None
__all__ = ['get_engine_object']
| 34.4 | 79 | 0.603488 | import os
from importlib.util import find_spec
from importlib import import_module
import inspect
def get_engine_object(table, baseobj):
spec = find_spec('suzieq.engines.pandas')
for file in spec.loader.contents():
if (os.path.isfile(f'{os.path.dirname(spec.loader.path)}/{file}') and
not file.startswith('_')):
modname = file.split('.')[0]
mod = import_module(f'suzieq.engines.pandas.{modname}')
for mbr in inspect.getmembers(mod):
if inspect.isclass(mbr[1]) and mbr[0] != 'SqPandasEngine':
fn = getattr(mbr[1], 'table_name', '')
if fn and fn() == table:
return mbr[1](baseobj)
return None
__all__ = ['get_engine_object']
| true | true |
f733aa021ea1d0b422473d9514c4cc7fd6b246d6 | 1,306 | py | Python | stats/defense.py | micka59200/Python-Baseball | dda463b1ba49e70dab676d1d3e57edc8238d0df6 | [
"MIT"
] | null | null | null | stats/defense.py | micka59200/Python-Baseball | dda463b1ba49e70dab676d1d3e57edc8238d0df6 | [
"MIT"
] | null | null | null | stats/defense.py | micka59200/Python-Baseball | dda463b1ba49e70dab676d1d3e57edc8238d0df6 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from frames import games, info, events
plays = games.query("type == 'play' & event != 'NP'")
plays.columns = ['type', 'inning', 'team', 'player', 'count', 'pitches', 'event', 'game_id', 'year']
pa = plays.loc[plays['player'].shift() != plays['player'], ['year', 'game_id', 'inning', 'team', 'player']]
pa = pa.groupby(['year', 'game_id', 'team']).size().reset_index(name='PA')
events = events.set_index(['year', 'game_id', 'team', 'event_type'])
events = events.unstack().fillna(0).reset_index()
events.columns = events.columns.droplevel()
events.columns = ['year', 'game_id', 'team', 'BB', 'E', 'H', 'HBP', 'HR', 'ROE', 'SO']
events = events.rename_axis(None, axis='columns')
events_plus_pa = pd.merge(events, pa, how='outer', left_on=['year', 'game_id', 'team'], right_on=['year', 'game_id', 'team'])
defense = pd.merge(events_plus_pa, info)
defense.loc[:, 'DER'] = 1 - ((defense['H'] + defense['ROE']) / (defense['PA'] - defense['BB'] -defense['SO'] - defense['HBP'] - defense['HR']))
defense.loc[:, 'year'] = pd.to_numeric(defense['year'])
der = defense.loc[defense['year'] >= 1978, ['year', 'defense', 'DER']]
der = der.pivot(index='year', columns='defense', values='DER')
der.plot(x_compat=True, xticks=range(1978, 2018, 4), rot=45)
plt.show() | 52.24 | 143 | 0.641654 | import pandas as pd
import matplotlib.pyplot as plt
from frames import games, info, events
plays = games.query("type == 'play' & event != 'NP'")
plays.columns = ['type', 'inning', 'team', 'player', 'count', 'pitches', 'event', 'game_id', 'year']
pa = plays.loc[plays['player'].shift() != plays['player'], ['year', 'game_id', 'inning', 'team', 'player']]
pa = pa.groupby(['year', 'game_id', 'team']).size().reset_index(name='PA')
events = events.set_index(['year', 'game_id', 'team', 'event_type'])
events = events.unstack().fillna(0).reset_index()
events.columns = events.columns.droplevel()
events.columns = ['year', 'game_id', 'team', 'BB', 'E', 'H', 'HBP', 'HR', 'ROE', 'SO']
events = events.rename_axis(None, axis='columns')
events_plus_pa = pd.merge(events, pa, how='outer', left_on=['year', 'game_id', 'team'], right_on=['year', 'game_id', 'team'])
defense = pd.merge(events_plus_pa, info)
defense.loc[:, 'DER'] = 1 - ((defense['H'] + defense['ROE']) / (defense['PA'] - defense['BB'] -defense['SO'] - defense['HBP'] - defense['HR']))
defense.loc[:, 'year'] = pd.to_numeric(defense['year'])
der = defense.loc[defense['year'] >= 1978, ['year', 'defense', 'DER']]
der = der.pivot(index='year', columns='defense', values='DER')
der.plot(x_compat=True, xticks=range(1978, 2018, 4), rot=45)
plt.show() | true | true |
f733aa2c6257379ee86445b98495e3b69e2b1b43 | 964 | py | Python | ciscodnacnautobot/navigation.py | joakimnyden/ciscodnacnautobot | 1c95f8b9205c389505afc85579e1c61d78b333a5 | [
"BSD-Source-Code"
] | 2 | 2021-04-15T07:26:12.000Z | 2022-01-24T09:38:29.000Z | ciscodnacnautobot/navigation.py | joakimnyden/ciscodnacnautobot | 1c95f8b9205c389505afc85579e1c61d78b333a5 | [
"BSD-Source-Code"
] | null | null | null | ciscodnacnautobot/navigation.py | joakimnyden/ciscodnacnautobot | 1c95f8b9205c389505afc85579e1c61d78b333a5 | [
"BSD-Source-Code"
] | null | null | null | #from extras.plugins import PluginMenuButton, PluginMenuItem
from nautobot.extras.plugins import PluginMenuButton, PluginMenuItem
#from utilities.choices import ButtonColorChoices
from nautobot.utilities.choices import ButtonColorChoices
menu_items = (
PluginMenuItem(
link="plugins:ciscodnacnautobot:status",
link_text="Status",
permissions=["admin"],
buttons=(
PluginMenuButton(
link="plugins:ciscodnacnautobot:sync_full",
title="Settings",
icon_class="mdi mdi-all-inclusive",
color=ButtonColorChoices.BLUE,
permissions=["admin"],
),
PluginMenuButton(
link="plugins:ciscodnacnautobot:settings",
title="Settings",
icon_class="mdi mdi-cog",
color=ButtonColorChoices.BLUE,
permissions=["admin"],
),
),
),
)
| 32.133333 | 68 | 0.591286 |
from nautobot.extras.plugins import PluginMenuButton, PluginMenuItem
from nautobot.utilities.choices import ButtonColorChoices
menu_items = (
PluginMenuItem(
link="plugins:ciscodnacnautobot:status",
link_text="Status",
permissions=["admin"],
buttons=(
PluginMenuButton(
link="plugins:ciscodnacnautobot:sync_full",
title="Settings",
icon_class="mdi mdi-all-inclusive",
color=ButtonColorChoices.BLUE,
permissions=["admin"],
),
PluginMenuButton(
link="plugins:ciscodnacnautobot:settings",
title="Settings",
icon_class="mdi mdi-cog",
color=ButtonColorChoices.BLUE,
permissions=["admin"],
),
),
),
)
| true | true |
f733aad2a6bc5212c2f7db3edbf36799214f09e5 | 1,004 | py | Python | Reversing/HotelDoorPuzzle/solve.py | HackUCF/SunshineCTF-2020-Public | 2a57c425784a9940a4a817489b41d630d24e3cf7 | [
"MIT"
] | 7 | 2020-11-12T13:26:44.000Z | 2020-11-14T05:56:32.000Z | Reversing/HotelDoorPuzzle/solve.py | HackUCF/SunshineCTF-2020-Public | 2a57c425784a9940a4a817489b41d630d24e3cf7 | [
"MIT"
] | null | null | null | Reversing/HotelDoorPuzzle/solve.py | HackUCF/SunshineCTF-2020-Public | 2a57c425784a9940a4a817489b41d630d24e3cf7 | [
"MIT"
] | 1 | 2020-12-08T17:04:46.000Z | 2020-12-08T17:04:46.000Z | # Angr script written by other people
import angr
import claripy
FLAG_LEN = 29
STDIN_FD = 0
# base_addr = 0x100000 # To match addresses to Ghidra
base_addr = 0
proj = angr.Project("./attachments/hotel_key_puzzle", main_opts={'base_addr': base_addr})
flag_chars = [claripy.BVS('sun{%d}' % i, 8) for i in range(FLAG_LEN)]
flag = claripy.Concat( *flag_chars + [claripy.BVV(b'\n')]) # Add \n for scanf() to accept the input
state = proj.factory.full_init_state(
args=['./attachments/hotel_key_puzzle'],
add_options=angr.options.unicorn,
stdin=flag,
)
# Add constraints that all characters are printable
for k in flag_chars:
state.solver.add(k >= ord('!'))
state.solver.add(k <= ord('~'))
simgr = proj.factory.simulation_manager(state)
find_addr = 0x22ba # SUCCESS
avoid_addr = 0x22c8 # FAILURE
simgr.explore(find=find_addr, avoid=avoid_addr)
if (len(simgr.found) > 0):
for found in simgr.found:
print(found.posix.dumps(STDIN_FD).decode('utf-8').strip())
| 28.685714 | 99 | 0.699203 |
import angr
import claripy
FLAG_LEN = 29
STDIN_FD = 0
oject("./attachments/hotel_key_puzzle", main_opts={'base_addr': base_addr})
flag_chars = [claripy.BVS('sun{%d}' % i, 8) for i in range(FLAG_LEN)]
flag = claripy.Concat( *flag_chars + [claripy.BVV(b'\n')])
state = proj.factory.full_init_state(
args=['./attachments/hotel_key_puzzle'],
add_options=angr.options.unicorn,
stdin=flag,
)
for k in flag_chars:
state.solver.add(k >= ord('!'))
state.solver.add(k <= ord('~'))
simgr = proj.factory.simulation_manager(state)
find_addr = 0x22ba
avoid_addr = 0x22c8
simgr.explore(find=find_addr, avoid=avoid_addr)
if (len(simgr.found) > 0):
for found in simgr.found:
print(found.posix.dumps(STDIN_FD).decode('utf-8').strip())
| true | true |
f733abcf753b5409efd7aae6465fe22be626b10b | 3,276 | py | Python | app/airtable/base_school_db/typeform_start_a_school.py | WildflowerSchools/wf-airtable-api | 963021e5108462d33efa222fedb00890e1788ad6 | [
"MIT"
] | null | null | null | app/airtable/base_school_db/typeform_start_a_school.py | WildflowerSchools/wf-airtable-api | 963021e5108462d33efa222fedb00890e1788ad6 | [
"MIT"
] | null | null | null | app/airtable/base_school_db/typeform_start_a_school.py | WildflowerSchools/wf-airtable-api | 963021e5108462d33efa222fedb00890e1788ad6 | [
"MIT"
] | null | null | null | from datetime import datetime
from typing import Optional
from pydantic import BaseModel, Field
from app.airtable.response import AirtableResponse
class CreateAirtableSSJTypeformStartASchool(BaseModel):
first_name: str = Field(alias="First Name")
last_name: str = Field(alias="Last Name")
email: str = Field(alias="Email")
is_montessori_certified: bool = Field(alias="Is Montessori Certified", default=False)
is_seeking_montessori_certification: bool = Field(alias="Is Seeking Montessori Certification", default=False)
montessori_certification_certifier: Optional[str] = Field(alias="Montessori Certification Certifier")
montessori_certification_year: Optional[int] = Field(alias="Montessori Certification Year")
montessori_certification_levels: Optional[str] = Field(alias="Montessori Certification Levels")
school_location_city: str = Field(alias="School Location: City")
school_location_state: str = Field(alias="School Location: State")
school_location_country: Optional[str] = Field(alias="School Location: Country")
school_location_community: Optional[str] = Field(alias="School Location: Community")
contact_location_city: str = Field(alias="Contact Location: City")
contact_location_state: str = Field(alias="Contact Location: State")
contact_location_country: Optional[str] = Field(alias="Contact Location: Country")
has_interest_in_joining_another_school: bool = Field(alias="Has Interest in Joining Another School", default=False)
is_willing_to_move: bool = Field(alias="Is Willing to Move", default=False)
age_classrooms_interested_in_offering: Optional[str] = Field(alias="Age Classrooms Interested In Offering")
socio_economic_race_and_ethnicity: Optional[str] = Field(alias="Socio-Economic: Race & Ethnicity")
socio_economic_race_and_ethnicity_other: Optional[str] = Field(alias="Socio-Economic: Race & Ethnicity Other")
socio_economic_lgbtqia_identifying: Optional[str] = Field(alias="Socio-Economic: LGBTQIA Identifying")
socio_economic_pronouns: Optional[str] = Field(alias="Socio-Economic: Pronouns")
socio_economic_pronouns_other: Optional[str] = Field(alias="Socio-Economic: Pronouns Other")
socio_economic_gender: Optional[str] = Field(alias="Socio-Economic: Gender")
socio_economic_gender_other: Optional[str] = Field(alias="Socio-Economic: Gender Other")
socio_economic_household_income: Optional[str] = Field(alias="Socio-Economic: Household Income")
socio_economic_primary_language: Optional[str] = Field(alias="Socio-Economic: Primary Language")
message: str = Field(alias="Message")
equity_reflection: Optional[str] = Field(alias="Equity Reflection")
receive_communications: bool = Field(alias="Receive Communications", default=False)
entry_date: datetime = Field(alias="Entry Date")
class Config:
allow_population_by_field_name = True
class AirtableSSJTypeformStartASchoolFields(CreateAirtableSSJTypeformStartASchool):
response_id: str = Field(alias="Response ID")
created_at: datetime = Field(alias="Created At")
class Config:
allow_population_by_field_name = True
class AirtableSSJTypeformStartASchoolResponse(AirtableResponse):
fields: AirtableSSJTypeformStartASchoolFields
| 58.5 | 119 | 0.778999 | from datetime import datetime
from typing import Optional
from pydantic import BaseModel, Field
from app.airtable.response import AirtableResponse
class CreateAirtableSSJTypeformStartASchool(BaseModel):
first_name: str = Field(alias="First Name")
last_name: str = Field(alias="Last Name")
email: str = Field(alias="Email")
is_montessori_certified: bool = Field(alias="Is Montessori Certified", default=False)
is_seeking_montessori_certification: bool = Field(alias="Is Seeking Montessori Certification", default=False)
montessori_certification_certifier: Optional[str] = Field(alias="Montessori Certification Certifier")
montessori_certification_year: Optional[int] = Field(alias="Montessori Certification Year")
montessori_certification_levels: Optional[str] = Field(alias="Montessori Certification Levels")
school_location_city: str = Field(alias="School Location: City")
school_location_state: str = Field(alias="School Location: State")
school_location_country: Optional[str] = Field(alias="School Location: Country")
school_location_community: Optional[str] = Field(alias="School Location: Community")
contact_location_city: str = Field(alias="Contact Location: City")
contact_location_state: str = Field(alias="Contact Location: State")
contact_location_country: Optional[str] = Field(alias="Contact Location: Country")
has_interest_in_joining_another_school: bool = Field(alias="Has Interest in Joining Another School", default=False)
is_willing_to_move: bool = Field(alias="Is Willing to Move", default=False)
age_classrooms_interested_in_offering: Optional[str] = Field(alias="Age Classrooms Interested In Offering")
socio_economic_race_and_ethnicity: Optional[str] = Field(alias="Socio-Economic: Race & Ethnicity")
socio_economic_race_and_ethnicity_other: Optional[str] = Field(alias="Socio-Economic: Race & Ethnicity Other")
socio_economic_lgbtqia_identifying: Optional[str] = Field(alias="Socio-Economic: LGBTQIA Identifying")
socio_economic_pronouns: Optional[str] = Field(alias="Socio-Economic: Pronouns")
socio_economic_pronouns_other: Optional[str] = Field(alias="Socio-Economic: Pronouns Other")
socio_economic_gender: Optional[str] = Field(alias="Socio-Economic: Gender")
socio_economic_gender_other: Optional[str] = Field(alias="Socio-Economic: Gender Other")
socio_economic_household_income: Optional[str] = Field(alias="Socio-Economic: Household Income")
socio_economic_primary_language: Optional[str] = Field(alias="Socio-Economic: Primary Language")
message: str = Field(alias="Message")
equity_reflection: Optional[str] = Field(alias="Equity Reflection")
receive_communications: bool = Field(alias="Receive Communications", default=False)
entry_date: datetime = Field(alias="Entry Date")
class Config:
allow_population_by_field_name = True
class AirtableSSJTypeformStartASchoolFields(CreateAirtableSSJTypeformStartASchool):
response_id: str = Field(alias="Response ID")
created_at: datetime = Field(alias="Created At")
class Config:
allow_population_by_field_name = True
class AirtableSSJTypeformStartASchoolResponse(AirtableResponse):
fields: AirtableSSJTypeformStartASchoolFields
| true | true |
f733ac757dfd39afd2ebc244aed5f4a231c49aab | 11,619 | py | Python | corehq/motech/repeaters/repeater_generators.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | null | null | null | corehq/motech/repeaters/repeater_generators.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | null | null | null | corehq/motech/repeaters/repeater_generators.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | null | null | null | import json
import warnings
from collections import namedtuple
from datetime import datetime
from uuid import uuid4
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.translation import ugettext_lazy as _
from casexml.apps.case.xform import get_case_ids_from_form
from casexml.apps.case.xml import V2
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.receiverwrapper.exceptions import DuplicateFormatException
def _get_test_form(domain):
from corehq.form_processor.utils import TestFormMetadata
from corehq.form_processor.utils import get_simple_wrapped_form
metadata = TestFormMetadata(domain=domain, xmlns=uuid4().hex, form_name='Demo Form')
return get_simple_wrapped_form('test-form-' + uuid4().hex, metadata=metadata, save=False)
class BasePayloadGenerator(object):
# you only have to override these
# when there's more than one format option for a given repeater
format_name = ''
format_label = ""
# if you ever change format_name, add the old format_name here for backwards compatability
deprecated_format_names = ()
def __init__(self, repeater):
self.repeater = repeater
@property
def content_type(self):
return 'text/xml'
@staticmethod
def enabled_for_domain(domain):
return True
def get_payload(self, repeat_record, payload_doc):
raise NotImplementedError()
def get_headers(self):
return {'Content-Type': self.content_type}
def get_test_payload(self, domain):
return (
"<?xml version='1.0' ?>"
"<data id='test'>"
"<TestString>Test post from CommCareHQ on %s</TestString>"
"</data>" % datetime.utcnow()
)
def handle_success(self, response, payload_doc, repeat_record):
"""handle a successful post
e.g. could be used to store something to the payload_doc once a
response is recieved
"""
return True
def handle_failure(self, response, payload_doc, repeat_record):
"""handle a failed post
"""
return True
def handle_exception(self, exception, repeat_record):
"""handle an exception
"""
return True
FormatInfo = namedtuple('FormatInfo', 'name label generator_class')
class GeneratorCollection(object):
"""Collection of format_name to Payload Generators for a Repeater class
args:
repeater_class: A valid child class of Repeater class
"""
def __init__(self, repeater_class):
self.repeater_class = repeater_class
self.default_format = ''
self.format_generator_map = {}
def add_new_format(self, generator_class, is_default=False):
"""Adds a new format->generator mapping to the collection
args:
generator_class: child class of .repeater_generators.BasePayloadGenerator
kwargs:
is_default: True if the format_name should be default format
exceptions:
raises DuplicateFormatException if format is added with is_default while other
default exists
raises DuplicateFormatException if format_name alread exists in the collection
"""
if is_default and self.default_format:
raise DuplicateFormatException("A default format already exists for this repeater.")
elif is_default:
self.default_format = generator_class.format_name
if generator_class.format_name in self.format_generator_map:
raise DuplicateFormatException("There is already a Generator with this format name.")
self.format_generator_map[generator_class.format_name] = FormatInfo(
name=generator_class.format_name,
label=generator_class.format_label,
generator_class=generator_class
)
def get_default_format(self):
"""returns default format"""
return self.default_format
def get_default_generator(self):
"""returns generator class for the default format"""
raise self.format_generator_map[self.default_format].generator_class
def get_all_formats(self, for_domain=None):
"""returns all the formats added to this repeater collection"""
return [(name, format.label) for name, format in self.format_generator_map.items()
if not for_domain or format.generator_class.enabled_for_domain(for_domain)]
def get_generator_by_format(self, format):
"""returns generator class given a format"""
try:
return self.format_generator_map[format].generator_class
except KeyError:
for info in self.format_generator_map.values():
if format in info.generator_class.deprecated_format_names:
return info.generator_class
raise
class RegisterGenerator(object):
"""Decorator to register new formats and Payload generators for Repeaters
args:
repeater_cls: A child class of Repeater for which the new format is being added
format_name: unique identifier for the format
format_label: description for the format
kwargs:
is_default: whether the format is default to the repeater_cls
"""
generators = {}
def __init__(self, repeater_cls, format_name, format_label, is_default=False):
self.format_name = format_name
self.format_label = format_label
self.repeater_cls = repeater_cls
self.label = format_label
self.is_default = is_default
def __call__(self, generator_class):
warnings.warn(
"Usage of @RegisterGenerator as a decorator is deprecated. "
"Please put your payload generator classes in a tuple on your repeater class "
"called payload_generator_classes instead.",
DeprecationWarning)
generator_class.format_label = self.format_label
generator_class.format_name = self.format_name
self.register_generator(generator_class, self.repeater_cls, is_default=self.is_default)
return generator_class
@classmethod
def register_generator(cls, generator_class, repeater_class, is_default):
cls.get_collection(repeater_class).add_new_format(generator_class, is_default)
@classmethod
def get_collection(cls, repeater_class):
if repeater_class not in cls.generators:
cls.generators[repeater_class] = GeneratorCollection(repeater_class)
generator_classes = repeater_class.payload_generator_classes
default_generator_class = generator_classes[0]
for generator_class in generator_classes:
cls.register_generator(
generator_class=generator_class,
repeater_class=repeater_class,
is_default=(generator_class is default_generator_class),
)
return cls.generators[repeater_class]
@classmethod
def generator_class_by_repeater_format(cls, repeater_class, format_name):
"""Return generator class given a Repeater class and format_name"""
return cls.get_collection(repeater_class).get_generator_by_format(format_name)
@classmethod
def all_formats_by_repeater(cls, repeater_class, for_domain=None):
"""Return all formats for a given Repeater class"""
return cls.get_collection(repeater_class).get_all_formats(for_domain=for_domain)
@classmethod
def default_format_by_repeater(cls, repeater_class):
"""Return default format_name for a Repeater class"""
return cls.get_collection(repeater_class).get_default_format()
class FormRepeaterXMLPayloadGenerator(BasePayloadGenerator):
format_name = 'form_xml'
format_label = _("XML")
def get_payload(self, repeat_record, payload_doc):
return payload_doc.get_xml()
def get_test_payload(self, domain):
return self.get_payload(None, _get_test_form(domain))
class CaseRepeaterXMLPayloadGenerator(BasePayloadGenerator):
format_name = 'case_xml'
format_label = _("XML")
def get_payload(self, repeat_record, payload_doc):
return payload_doc.to_xml(self.repeater.version or V2, include_case_on_closed=True)
def get_test_payload(self, domain):
from casexml.apps.case.mock import CaseBlock
return CaseBlock(
case_id='test-case-%s' % uuid4().hex,
create=True,
case_type='test',
case_name='test case',
).as_text()
class CaseRepeaterJsonPayloadGenerator(BasePayloadGenerator):
format_name = 'case_json'
format_label = _('JSON')
def get_payload(self, repeat_record, payload_doc):
data = payload_doc.to_api_json(lite=True)
return json.dumps(data, cls=DjangoJSONEncoder)
@property
def content_type(self):
return 'application/json'
def get_test_payload(self, domain):
from casexml.apps.case.models import CommCareCase
return self.get_payload(
None,
CommCareCase(
domain=domain, type='case_type', name='Demo',
user_id='user1', prop_a=True, prop_b='value'
)
)
class AppStructureGenerator(BasePayloadGenerator):
deprecated_format_names = ('app_structure_xml',)
def get_payload(self, repeat_record, payload_doc):
# This is the id of the application, currently all we forward
return repeat_record.payload_id
class ShortFormRepeaterJsonPayloadGenerator(BasePayloadGenerator):
deprecated_format_names = ('short_form_json',)
def get_payload(self, repeat_record, form):
case_ids = list(get_case_ids_from_form(form))
return json.dumps({'form_id': form.form_id,
'received_on': json_format_datetime(form.received_on),
'case_ids': case_ids})
@property
def content_type(self):
return 'application/json'
def get_test_payload(self, domain):
return json.dumps({
'form_id': 'test-form-' + uuid4().hex,
'received_on': json_format_datetime(datetime.utcnow()),
'case_ids': ['test-case-' + uuid4().hex, 'test-case-' + uuid4().hex]
})
class FormRepeaterJsonPayloadGenerator(BasePayloadGenerator):
format_name = 'form_json'
format_label = _('JSON')
def get_payload(self, repeat_record, form):
from corehq.apps.api.resources.v0_4 import XFormInstanceResource
from corehq.apps.api.util import form_to_es_form
res = XFormInstanceResource()
bundle = res.build_bundle(obj=form_to_es_form(form, include_attachments=True))
return res.serialize(None, res.full_dehydrate(bundle), 'application/json')
@property
def content_type(self):
return 'application/json'
def get_test_payload(self, domain):
return self.get_payload(None, _get_test_form(domain))
class UserPayloadGenerator(BasePayloadGenerator):
@property
def content_type(self):
return 'application/json'
def get_payload(self, repeat_record, user):
from corehq.apps.api.resources.v0_5 import CommCareUserResource
resource = CommCareUserResource(api_name='v0.5')
bundle = resource.build_bundle(obj=user)
return json.dumps(resource.full_dehydrate(bundle).data, cls=DjangoJSONEncoder)
class LocationPayloadGenerator(BasePayloadGenerator):
@property
def content_type(self):
return 'application/json'
def get_payload(self, repeat_record, location):
return json.dumps(location.to_json())
| 34.580357 | 97 | 0.691626 | import json
import warnings
from collections import namedtuple
from datetime import datetime
from uuid import uuid4
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.translation import ugettext_lazy as _
from casexml.apps.case.xform import get_case_ids_from_form
from casexml.apps.case.xml import V2
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.receiverwrapper.exceptions import DuplicateFormatException
def _get_test_form(domain):
from corehq.form_processor.utils import TestFormMetadata
from corehq.form_processor.utils import get_simple_wrapped_form
metadata = TestFormMetadata(domain=domain, xmlns=uuid4().hex, form_name='Demo Form')
return get_simple_wrapped_form('test-form-' + uuid4().hex, metadata=metadata, save=False)
class BasePayloadGenerator(object):
format_name = ''
format_label = ""
# if you ever change format_name, add the old format_name here for backwards compatability
deprecated_format_names = ()
def __init__(self, repeater):
self.repeater = repeater
@property
def content_type(self):
return 'text/xml'
@staticmethod
def enabled_for_domain(domain):
return True
def get_payload(self, repeat_record, payload_doc):
raise NotImplementedError()
def get_headers(self):
return {'Content-Type': self.content_type}
def get_test_payload(self, domain):
return (
"<?xml version='1.0' ?>"
"<data id='test'>"
"<TestString>Test post from CommCareHQ on %s</TestString>"
"</data>" % datetime.utcnow()
)
def handle_success(self, response, payload_doc, repeat_record):
return True
def handle_failure(self, response, payload_doc, repeat_record):
return True
def handle_exception(self, exception, repeat_record):
return True
FormatInfo = namedtuple('FormatInfo', 'name label generator_class')
class GeneratorCollection(object):
def __init__(self, repeater_class):
self.repeater_class = repeater_class
self.default_format = ''
self.format_generator_map = {}
def add_new_format(self, generator_class, is_default=False):
if is_default and self.default_format:
raise DuplicateFormatException("A default format already exists for this repeater.")
elif is_default:
self.default_format = generator_class.format_name
if generator_class.format_name in self.format_generator_map:
raise DuplicateFormatException("There is already a Generator with this format name.")
self.format_generator_map[generator_class.format_name] = FormatInfo(
name=generator_class.format_name,
label=generator_class.format_label,
generator_class=generator_class
)
def get_default_format(self):
return self.default_format
def get_default_generator(self):
raise self.format_generator_map[self.default_format].generator_class
def get_all_formats(self, for_domain=None):
return [(name, format.label) for name, format in self.format_generator_map.items()
if not for_domain or format.generator_class.enabled_for_domain(for_domain)]
def get_generator_by_format(self, format):
try:
return self.format_generator_map[format].generator_class
except KeyError:
for info in self.format_generator_map.values():
if format in info.generator_class.deprecated_format_names:
return info.generator_class
raise
class RegisterGenerator(object):
generators = {}
def __init__(self, repeater_cls, format_name, format_label, is_default=False):
self.format_name = format_name
self.format_label = format_label
self.repeater_cls = repeater_cls
self.label = format_label
self.is_default = is_default
def __call__(self, generator_class):
warnings.warn(
"Usage of @RegisterGenerator as a decorator is deprecated. "
"Please put your payload generator classes in a tuple on your repeater class "
"called payload_generator_classes instead.",
DeprecationWarning)
generator_class.format_label = self.format_label
generator_class.format_name = self.format_name
self.register_generator(generator_class, self.repeater_cls, is_default=self.is_default)
return generator_class
@classmethod
def register_generator(cls, generator_class, repeater_class, is_default):
cls.get_collection(repeater_class).add_new_format(generator_class, is_default)
@classmethod
def get_collection(cls, repeater_class):
if repeater_class not in cls.generators:
cls.generators[repeater_class] = GeneratorCollection(repeater_class)
generator_classes = repeater_class.payload_generator_classes
default_generator_class = generator_classes[0]
for generator_class in generator_classes:
cls.register_generator(
generator_class=generator_class,
repeater_class=repeater_class,
is_default=(generator_class is default_generator_class),
)
return cls.generators[repeater_class]
@classmethod
def generator_class_by_repeater_format(cls, repeater_class, format_name):
return cls.get_collection(repeater_class).get_generator_by_format(format_name)
@classmethod
def all_formats_by_repeater(cls, repeater_class, for_domain=None):
return cls.get_collection(repeater_class).get_all_formats(for_domain=for_domain)
@classmethod
def default_format_by_repeater(cls, repeater_class):
return cls.get_collection(repeater_class).get_default_format()
class FormRepeaterXMLPayloadGenerator(BasePayloadGenerator):
format_name = 'form_xml'
format_label = _("XML")
def get_payload(self, repeat_record, payload_doc):
return payload_doc.get_xml()
def get_test_payload(self, domain):
return self.get_payload(None, _get_test_form(domain))
class CaseRepeaterXMLPayloadGenerator(BasePayloadGenerator):
format_name = 'case_xml'
format_label = _("XML")
def get_payload(self, repeat_record, payload_doc):
return payload_doc.to_xml(self.repeater.version or V2, include_case_on_closed=True)
def get_test_payload(self, domain):
from casexml.apps.case.mock import CaseBlock
return CaseBlock(
case_id='test-case-%s' % uuid4().hex,
create=True,
case_type='test',
case_name='test case',
).as_text()
class CaseRepeaterJsonPayloadGenerator(BasePayloadGenerator):
format_name = 'case_json'
format_label = _('JSON')
def get_payload(self, repeat_record, payload_doc):
data = payload_doc.to_api_json(lite=True)
return json.dumps(data, cls=DjangoJSONEncoder)
@property
def content_type(self):
return 'application/json'
def get_test_payload(self, domain):
from casexml.apps.case.models import CommCareCase
return self.get_payload(
None,
CommCareCase(
domain=domain, type='case_type', name='Demo',
user_id='user1', prop_a=True, prop_b='value'
)
)
class AppStructureGenerator(BasePayloadGenerator):
deprecated_format_names = ('app_structure_xml',)
def get_payload(self, repeat_record, payload_doc):
# This is the id of the application, currently all we forward
return repeat_record.payload_id
class ShortFormRepeaterJsonPayloadGenerator(BasePayloadGenerator):
deprecated_format_names = ('short_form_json',)
def get_payload(self, repeat_record, form):
case_ids = list(get_case_ids_from_form(form))
return json.dumps({'form_id': form.form_id,
'received_on': json_format_datetime(form.received_on),
'case_ids': case_ids})
@property
def content_type(self):
return 'application/json'
def get_test_payload(self, domain):
return json.dumps({
'form_id': 'test-form-' + uuid4().hex,
'received_on': json_format_datetime(datetime.utcnow()),
'case_ids': ['test-case-' + uuid4().hex, 'test-case-' + uuid4().hex]
})
class FormRepeaterJsonPayloadGenerator(BasePayloadGenerator):
format_name = 'form_json'
format_label = _('JSON')
def get_payload(self, repeat_record, form):
from corehq.apps.api.resources.v0_4 import XFormInstanceResource
from corehq.apps.api.util import form_to_es_form
res = XFormInstanceResource()
bundle = res.build_bundle(obj=form_to_es_form(form, include_attachments=True))
return res.serialize(None, res.full_dehydrate(bundle), 'application/json')
@property
def content_type(self):
return 'application/json'
def get_test_payload(self, domain):
return self.get_payload(None, _get_test_form(domain))
class UserPayloadGenerator(BasePayloadGenerator):
@property
def content_type(self):
return 'application/json'
def get_payload(self, repeat_record, user):
from corehq.apps.api.resources.v0_5 import CommCareUserResource
resource = CommCareUserResource(api_name='v0.5')
bundle = resource.build_bundle(obj=user)
return json.dumps(resource.full_dehydrate(bundle).data, cls=DjangoJSONEncoder)
class LocationPayloadGenerator(BasePayloadGenerator):
@property
def content_type(self):
return 'application/json'
def get_payload(self, repeat_record, location):
return json.dumps(location.to_json())
| true | true |
f733addf00b5daaa12cd2878c7c980cfb081f7b0 | 450 | py | Python | sdk/python/pulumi_ucloud/unet/__init__.py | AaronFriel/pulumi-ucloud | 199278786dddf46bdd370f3f805e30b279c63ff2 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2021-08-18T04:55:38.000Z | 2021-09-08T07:59:24.000Z | sdk/python/pulumi_ucloud/unet/__init__.py | AaronFriel/pulumi-ucloud | 199278786dddf46bdd370f3f805e30b279c63ff2 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-01-28T17:59:37.000Z | 2022-01-29T03:44:09.000Z | sdk/python/pulumi_ucloud/unet/__init__.py | AaronFriel/pulumi-ucloud | 199278786dddf46bdd370f3f805e30b279c63ff2 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-06-23T07:10:40.000Z | 2021-06-23T09:25:12.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .eip import *
from .eipassociation import *
from .get_eip import *
from .get_security_group import *
from .security_group import *
from ._inputs import *
from . import outputs
| 30 | 87 | 0.74 |
from .. import _utilities
import typing
# Export this package's modules as members:
from .eip import *
from .eipassociation import *
from .get_eip import *
from .get_security_group import *
from .security_group import *
from ._inputs import *
from . import outputs
| true | true |
f733ade0ac2b4da6d93a86e4d6ac33ca25862d9f | 4,522 | py | Python | mms/service.py | andrewfayres/mxnet-model-server | ef4edfef4cfe5234887bf834ec7b82676a36ba02 | [
"Apache-2.0"
] | 1 | 2019-01-30T02:57:31.000Z | 2019-01-30T02:57:31.000Z | mms/service.py | DrSnowbird/mxnet-model-server | a0bfd712350545dceb21c8e0b0b21dfa0c9918a7 | [
"Apache-2.0"
] | null | null | null | mms/service.py | DrSnowbird/mxnet-model-server | a0bfd712350545dceb21c8e0b0b21dfa0c9918a7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
CustomService class definitions
"""
import logging
import time
from builtins import str
import mms
from mms.context import Context, RequestProcessor
from mms.metrics.metrics_store import MetricsStore
from mms.protocol.otf_message_handler import create_predict_response
PREDICTION_METRIC = 'PredictionTime'
logger = logging.getLogger(__name__)
class Service(object):
"""
Wrapper for custom entry_point
"""
def __init__(self, model_name, model_dir, manifest, entry_point, gpu, batch_size):
self._context = Context(model_name, model_dir, manifest, batch_size, gpu, mms.__version__)
self._entry_point = entry_point
@property
def context(self):
return self._context
@staticmethod
def retrieve_data_for_inference(batch):
"""
REQUEST_INPUT = {
"requestId" : "111-222-3333",
"parameters" : [ PARAMETER ]
}
PARAMETER = {
"name" : parameter name
"contentType": "http-content-types",
"value": "val1"
}
:param batch:
:return:
"""
if batch is None:
raise ValueError("Received invalid inputs")
req_to_id_map = {}
headers = dict()
input_batch = []
for batch_idx, request_batch in enumerate(batch):
req_id = request_batch.get('requestId').decode("utf-8")
parameters = request_batch['parameters']
model_in_headers = dict()
model_in = dict()
for parameter in parameters:
model_in.update({parameter["name"]: parameter["value"]})
model_in_headers.update({parameter["name"]: {"content-type": parameter["contentType"]}})
headers.update({req_id: model_in_headers})
input_batch.append(model_in)
req_to_id_map[batch_idx] = req_id
return headers, input_batch, req_to_id_map
def predict(self, batch):
"""
PREDICT COMMAND = {
"command": "predict",
"batch": [ REQUEST_INPUT ]
}
:param batch: list of request
:return:
"""
headers, input_batch, req_id_map = Service.retrieve_data_for_inference(batch)
self.context.request_ids = req_id_map
self.context.request_processor = RequestProcessor(headers)
metrics = MetricsStore(req_id_map, self.context.model_name)
self.context.metrics = metrics
start_time = time.time()
# noinspection PyBroadException
try:
ret = self._entry_point(input_batch, self.context)
except Exception: # pylint: disable=broad-except
logger.warning("Invoking custom service failed.", exc_info=True)
return create_predict_response(None, req_id_map, "Prediction failed", 503)
if not isinstance(ret, list):
logger.warning("model: %s, Invalid return type: %s.", self.context.model_name, type(ret))
return create_predict_response(None, req_id_map, "Invalid model predict output", 503)
if len(ret) != len(input_batch):
logger.warning("model: %s, number of batch response mismatched, expect: %d, got: %d.",
self.context.model_name, len(input_batch), len(ret))
return create_predict_response(None, req_id_map, "number of batch response mismatched", 503)
duration = round((time.time() - start_time) * 1000, 2)
metrics.add_time(PREDICTION_METRIC, duration)
return create_predict_response(ret, req_id_map, "Prediction success", 200, context=self.context)
def emit_metrics(metrics):
"""
Emit the metrics in the provided Dictionary
Parameters
----------
metrics: Dictionary
A dictionary of all metrics, when key is metric_name
value is a metric object
"""
if metrics:
for met in metrics:
logger.info("[METRICS]%s", str(met))
| 33.746269 | 104 | 0.644847 |
import logging
import time
from builtins import str
import mms
from mms.context import Context, RequestProcessor
from mms.metrics.metrics_store import MetricsStore
from mms.protocol.otf_message_handler import create_predict_response
PREDICTION_METRIC = 'PredictionTime'
logger = logging.getLogger(__name__)
class Service(object):
def __init__(self, model_name, model_dir, manifest, entry_point, gpu, batch_size):
self._context = Context(model_name, model_dir, manifest, batch_size, gpu, mms.__version__)
self._entry_point = entry_point
@property
def context(self):
return self._context
@staticmethod
def retrieve_data_for_inference(batch):
if batch is None:
raise ValueError("Received invalid inputs")
req_to_id_map = {}
headers = dict()
input_batch = []
for batch_idx, request_batch in enumerate(batch):
req_id = request_batch.get('requestId').decode("utf-8")
parameters = request_batch['parameters']
model_in_headers = dict()
model_in = dict()
for parameter in parameters:
model_in.update({parameter["name"]: parameter["value"]})
model_in_headers.update({parameter["name"]: {"content-type": parameter["contentType"]}})
headers.update({req_id: model_in_headers})
input_batch.append(model_in)
req_to_id_map[batch_idx] = req_id
return headers, input_batch, req_to_id_map
def predict(self, batch):
headers, input_batch, req_id_map = Service.retrieve_data_for_inference(batch)
self.context.request_ids = req_id_map
self.context.request_processor = RequestProcessor(headers)
metrics = MetricsStore(req_id_map, self.context.model_name)
self.context.metrics = metrics
start_time = time.time()
try:
ret = self._entry_point(input_batch, self.context)
except Exception:
logger.warning("Invoking custom service failed.", exc_info=True)
return create_predict_response(None, req_id_map, "Prediction failed", 503)
if not isinstance(ret, list):
logger.warning("model: %s, Invalid return type: %s.", self.context.model_name, type(ret))
return create_predict_response(None, req_id_map, "Invalid model predict output", 503)
if len(ret) != len(input_batch):
logger.warning("model: %s, number of batch response mismatched, expect: %d, got: %d.",
self.context.model_name, len(input_batch), len(ret))
return create_predict_response(None, req_id_map, "number of batch response mismatched", 503)
duration = round((time.time() - start_time) * 1000, 2)
metrics.add_time(PREDICTION_METRIC, duration)
return create_predict_response(ret, req_id_map, "Prediction success", 200, context=self.context)
def emit_metrics(metrics):
if metrics:
for met in metrics:
logger.info("[METRICS]%s", str(met))
| true | true |
f733b1ee76956d5a39af334e2f55ee1e03c4f971 | 1,089 | py | Python | colab/grr_colab/_api.py | certxlm/grr | c2a442a27f656fb18dfa3bce098847e5c5b849d7 | [
"Apache-2.0"
] | null | null | null | colab/grr_colab/_api.py | certxlm/grr | c2a442a27f656fb18dfa3bce098847e5c5b849d7 | [
"Apache-2.0"
] | null | null | null | colab/grr_colab/_api.py | certxlm/grr | c2a442a27f656fb18dfa3bce098847e5c5b849d7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""A module for lazy instantiation of the GRR's Python API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from grr_api_client import api
from grr_colab import flags
FLAGS = flags.FLAGS
_API = None # type: api.GrrApi
def get():
"""Lazily returns the GRR API object.
This method is not thread-safe. This is okay because Colab is supposed to be
scripted interactively and no threading is involved.
Returns:
A GRR API object.
"""
global _API
if _API is None:
if not FLAGS.grr_http_api_endpoint:
raise ValueError("HTTP API endpoint has not been specified.")
if not FLAGS.grr_auth_api_user:
raise ValueError("API user name has not been specified.")
if not FLAGS.grr_auth_password:
raise ValueError("API user password has not been specified.")
auth = (FLAGS.grr_auth_api_user, FLAGS.grr_auth_password)
_API = api.InitHttp(
api_endpoint=FLAGS.grr_http_api_endpoint, auth=auth)
return _API
| 25.325581 | 78 | 0.742883 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from grr_api_client import api
from grr_colab import flags
FLAGS = flags.FLAGS
_API = None
def get():
global _API
if _API is None:
if not FLAGS.grr_http_api_endpoint:
raise ValueError("HTTP API endpoint has not been specified.")
if not FLAGS.grr_auth_api_user:
raise ValueError("API user name has not been specified.")
if not FLAGS.grr_auth_password:
raise ValueError("API user password has not been specified.")
auth = (FLAGS.grr_auth_api_user, FLAGS.grr_auth_password)
_API = api.InitHttp(
api_endpoint=FLAGS.grr_http_api_endpoint, auth=auth)
return _API
| true | true |
f733b20de7ac1766743c519b7f3c51c553df48ad | 1,196 | py | Python | src/datasets/main.py | ErikKratzCth/Deep-SVDD | f77209b85f654aa68d29ab636ecb422207f437e1 | [
"MIT"
] | 3 | 2019-06-14T09:26:38.000Z | 2019-09-06T11:51:47.000Z | src/datasets/main.py | KratzErik/Deep-SVDD | f77209b85f654aa68d29ab636ecb422207f437e1 | [
"MIT"
] | 14 | 2021-02-02T21:53:37.000Z | 2022-03-11T23:39:13.000Z | src/datasets/main.py | KratzErik/Deep-SVDD | f77209b85f654aa68d29ab636ecb422207f437e1 | [
"MIT"
] | 1 | 2020-07-15T03:21:48.000Z | 2020-07-15T03:21:48.000Z | from datasets.__local__ import implemented_datasets
from datasets.mnist import MNIST_DataLoader
from datasets.cifar10 import CIFAR_10_DataLoader
from datasets.GTSRB import GTSRB_DataLoader
from datasets.bdd100k import BDD100K_DataLoader
from datasets.dreyeve import DREYEVE_DataLoader
from datasets.prosivic import PROSIVIC_DataLoader
from datasets.smile import SMILE_DataLoader
def load_dataset(learner, dataset_name, pretrain=False):
assert dataset_name in implemented_datasets
if dataset_name == "mnist":
data_loader = MNIST_DataLoader
if dataset_name == "cifar10":
data_loader = CIFAR_10_DataLoader
if dataset_name == "gtsrb":
data_loader = GTSRB_DataLoader
if dataset_name == "bdd100k":
data_loader = BDD100K_DataLoader
if dataset_name == "dreyeve":
#data_loader = DREYEVE_DataLoader
data_loader = SMILE_DataLoader
if dataset_name == "prosivic":
#data_loader = PROSIVIC_DataLoader
data_loader = SMILE_DataLoader
# load data with data loader
learner.load_data(data_loader=data_loader, pretrain=pretrain)
# check all parameters have been attributed
learner.data.check_all()
| 29.9 | 65 | 0.758361 | from datasets.__local__ import implemented_datasets
from datasets.mnist import MNIST_DataLoader
from datasets.cifar10 import CIFAR_10_DataLoader
from datasets.GTSRB import GTSRB_DataLoader
from datasets.bdd100k import BDD100K_DataLoader
from datasets.dreyeve import DREYEVE_DataLoader
from datasets.prosivic import PROSIVIC_DataLoader
from datasets.smile import SMILE_DataLoader
def load_dataset(learner, dataset_name, pretrain=False):
assert dataset_name in implemented_datasets
if dataset_name == "mnist":
data_loader = MNIST_DataLoader
if dataset_name == "cifar10":
data_loader = CIFAR_10_DataLoader
if dataset_name == "gtsrb":
data_loader = GTSRB_DataLoader
if dataset_name == "bdd100k":
data_loader = BDD100K_DataLoader
if dataset_name == "dreyeve":
data_loader = SMILE_DataLoader
if dataset_name == "prosivic":
data_loader = SMILE_DataLoader
learner.load_data(data_loader=data_loader, pretrain=pretrain)
learner.data.check_all()
| true | true |
f733b236b54a57311064af65e938c72f6a407051 | 1,837 | py | Python | connect_four/evaluation/incremental_victor/graph/graph_manager_add_solution_profile.py | rpachauri/connect4 | 6caf6965afaaff6883193ac295c6ac5b1f4e9c4a | [
"MIT"
] | null | null | null | connect_four/evaluation/incremental_victor/graph/graph_manager_add_solution_profile.py | rpachauri/connect4 | 6caf6965afaaff6883193ac295c6ac5b1f4e9c4a | [
"MIT"
] | null | null | null | connect_four/evaluation/incremental_victor/graph/graph_manager_add_solution_profile.py | rpachauri/connect4 | 6caf6965afaaff6883193ac295c6ac5b1f4e9c4a | [
"MIT"
] | null | null | null | import cProfile
import gym
import numpy as np
from connect_four.evaluation.incremental_victor.graph.graph_manager import GraphManager
from connect_four.evaluation.incremental_victor.solution.victor_solution_manager import VictorSolutionManager
from connect_four.problem import ConnectFourGroupManager
env = gym.make('connect_four-v0')
env.state = np.array([
[
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 1, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 1, 0, 0, 0, ],
],
[
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 1, 1, 0, 0, ],
[0, 0, 0, 0, 1, 0, 0, ],
],
])
# noinspection SpellCheckingInspection
cfgm = ConnectFourGroupManager(env_variables=env.env_variables)
vsm = VictorSolutionManager(env_variables=env.env_variables)
player, row, col = 0, 5, 0
gm = GraphManager(player=player, problem_manager=cfgm, solution_manager=vsm)
_, removed_problems = cfgm.move(player=player, row=row, col=col)
for problem in removed_problems:
gm._remove_problem(problem)
removed_solutions, added_solutions = vsm.move(player=player, row=row, col=col)
print("len(removed_solutions) = ", len(removed_solutions))
print("len(added_solutions) = ", len(added_solutions))
# print("number of useful solutions =", len(self.solution_to_solutions))
for solution in removed_solutions:
gm._remove_solution(solution)
print("number of solutions that remained =", len(gm.solution_to_solutions))
def add_solutions():
for solution in added_solutions:
gm._add_solution(solution)
print("number of solutions after adding =", len(gm.solution_to_solutions))
cProfile.run(
'add_solutions()',
sort="cumtime",
)
| 29.15873 | 109 | 0.653783 | import cProfile
import gym
import numpy as np
from connect_four.evaluation.incremental_victor.graph.graph_manager import GraphManager
from connect_four.evaluation.incremental_victor.solution.victor_solution_manager import VictorSolutionManager
from connect_four.problem import ConnectFourGroupManager
env = gym.make('connect_four-v0')
env.state = np.array([
[
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 1, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 1, 0, 0, 0, ],
],
[
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, 0, 0, ],
[0, 0, 1, 1, 1, 0, 0, ],
[0, 0, 0, 0, 1, 0, 0, ],
],
])
cfgm = ConnectFourGroupManager(env_variables=env.env_variables)
vsm = VictorSolutionManager(env_variables=env.env_variables)
player, row, col = 0, 5, 0
gm = GraphManager(player=player, problem_manager=cfgm, solution_manager=vsm)
_, removed_problems = cfgm.move(player=player, row=row, col=col)
for problem in removed_problems:
gm._remove_problem(problem)
removed_solutions, added_solutions = vsm.move(player=player, row=row, col=col)
print("len(removed_solutions) = ", len(removed_solutions))
print("len(added_solutions) = ", len(added_solutions))
for solution in removed_solutions:
gm._remove_solution(solution)
print("number of solutions that remained =", len(gm.solution_to_solutions))
def add_solutions():
for solution in added_solutions:
gm._add_solution(solution)
print("number of solutions after adding =", len(gm.solution_to_solutions))
cProfile.run(
'add_solutions()',
sort="cumtime",
)
| true | true |
f733b3d4e0f215224c3aa2f0fd4a080eb67cd88f | 205 | py | Python | mongodb_server_test.py | fatihdq/vans-product-on-e-commerce | 0e55cb6c7841eba8a2c95ddc03821830e97593da | [
"MIT"
] | null | null | null | mongodb_server_test.py | fatihdq/vans-product-on-e-commerce | 0e55cb6c7841eba8a2c95ddc03821830e97593da | [
"MIT"
] | null | null | null | mongodb_server_test.py | fatihdq/vans-product-on-e-commerce | 0e55cb6c7841eba8a2c95ddc03821830e97593da | [
"MIT"
] | null | null | null | from pymongo import MongoClient
import json
from pprint import pprint
client = MongoClient('localhost:27017')
db = client.admin
serverStatusResult = db.command("serverStatus")
pprint(serverStatusResult) | 20.5 | 47 | 0.814634 | from pymongo import MongoClient
import json
from pprint import pprint
client = MongoClient('localhost:27017')
db = client.admin
serverStatusResult = db.command("serverStatus")
pprint(serverStatusResult) | true | true |
f733b435bce83c1b53aeaf765c901f14b8fb4fa4 | 2,171 | py | Python | slgnn/data_processing/jakfp_dataset.py | thomasly/slgnn | caa1e7814498da41ad025b4e62c569fe511848ff | [
"MIT"
] | 2 | 2020-08-31T00:55:31.000Z | 2020-09-01T19:59:30.000Z | slgnn/data_processing/jakfp_dataset.py | thomasly/slgnn | caa1e7814498da41ad025b4e62c569fe511848ff | [
"MIT"
] | null | null | null | slgnn/data_processing/jakfp_dataset.py | thomasly/slgnn | caa1e7814498da41ad025b4e62c569fe511848ff | [
"MIT"
] | null | null | null | import os
import pandas as pd
from chemreader.writers import GraphWriter
from chemreader.readers import Smiles
from rdkit.Chem import MolFromSmiles
from slgnn.models.gcn.utils import get_filtered_fingerprint
from tqdm import tqdm
def _is_active(value):
if value < 1000:
return 1
elif value >= 10000:
return -1
else:
return 0
def filter_(path):
""" Filter JAK dataset
"""
jak = pd.read_csv(path)
jak.dropna(subset=["Standard Relation", "Standard Value"], inplace=True)
not_eq = jak["Standard Relation"] != "'='"
lt_10um = jak["Standard Value"] < 100000
filtered = jak.drop(jak.loc[not_eq & lt_10um].index)
gt = jak["Standard Relation"] == "'>'"
eq_1um = jak["Standard Value"] >= 1000
add_back = jak.loc[gt & eq_1um]
filtered = filtered.append(add_back)
filtered["Activity"] = filtered["Standard Value"].apply(_is_active)
out_path = os.path.join(os.path.dirname(path), "filtered_" + os.path.basename(path))
filtered[["Smiles", "Activity"]].to_csv(out_path)
def write_graphs(inpath, outpath, prefix=None):
""" Convert JAK dataset to graphs
"""
smiles = list()
fps = list()
pb = tqdm()
with open(inpath, "r") as inf:
line = inf.readline()
while line:
_, sm, _ = line.strip().split(",")
if MolFromSmiles(sm) is None:
line = inf.readline()
continue
smiles.append(Smiles(sm))
fps.append(",".join(map(str, get_filtered_fingerprint(sm))))
pb.update(1)
line = inf.readline()
writer = GraphWriter(smiles)
writer.write(outpath, prefix=prefix, graph_labels=fps)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", help="Path to the JAK file")
args = parser.parse_args()
filter_(args.path)
inpath = os.path.join(
os.path.dirname(args.path), "filtered_" + os.path.basename(args.path)
)
pre = os.path.basename(args.path).split(".")[0] + "FP"
write_graphs(inpath, os.path.join(os.path.dirname(args.path), "graphs"), prefix=pre)
| 31.014286 | 88 | 0.628282 | import os
import pandas as pd
from chemreader.writers import GraphWriter
from chemreader.readers import Smiles
from rdkit.Chem import MolFromSmiles
from slgnn.models.gcn.utils import get_filtered_fingerprint
from tqdm import tqdm
def _is_active(value):
if value < 1000:
return 1
elif value >= 10000:
return -1
else:
return 0
def filter_(path):
jak = pd.read_csv(path)
jak.dropna(subset=["Standard Relation", "Standard Value"], inplace=True)
not_eq = jak["Standard Relation"] != "'='"
lt_10um = jak["Standard Value"] < 100000
filtered = jak.drop(jak.loc[not_eq & lt_10um].index)
gt = jak["Standard Relation"] == "'>'"
eq_1um = jak["Standard Value"] >= 1000
add_back = jak.loc[gt & eq_1um]
filtered = filtered.append(add_back)
filtered["Activity"] = filtered["Standard Value"].apply(_is_active)
out_path = os.path.join(os.path.dirname(path), "filtered_" + os.path.basename(path))
filtered[["Smiles", "Activity"]].to_csv(out_path)
def write_graphs(inpath, outpath, prefix=None):
smiles = list()
fps = list()
pb = tqdm()
with open(inpath, "r") as inf:
line = inf.readline()
while line:
_, sm, _ = line.strip().split(",")
if MolFromSmiles(sm) is None:
line = inf.readline()
continue
smiles.append(Smiles(sm))
fps.append(",".join(map(str, get_filtered_fingerprint(sm))))
pb.update(1)
line = inf.readline()
writer = GraphWriter(smiles)
writer.write(outpath, prefix=prefix, graph_labels=fps)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", help="Path to the JAK file")
args = parser.parse_args()
filter_(args.path)
inpath = os.path.join(
os.path.dirname(args.path), "filtered_" + os.path.basename(args.path)
)
pre = os.path.basename(args.path).split(".")[0] + "FP"
write_graphs(inpath, os.path.join(os.path.dirname(args.path), "graphs"), prefix=pre)
| true | true |
f733b50a33b0c1735ad7d299299f19c1dac4fcbb | 4,060 | py | Python | blackbird/test/test_configread/test_global/test_include.py | JumpeiArashi/blackbird | 1acd40c40c9626df68f252e6265b722d1a8da64b | [
"WTFPL"
] | null | null | null | blackbird/test/test_configread/test_global/test_include.py | JumpeiArashi/blackbird | 1acd40c40c9626df68f252e6265b722d1a8da64b | [
"WTFPL"
] | null | null | null | blackbird/test/test_configread/test_global/test_include.py | JumpeiArashi/blackbird | 1acd40c40c9626df68f252e6265b722d1a8da64b | [
"WTFPL"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import glob
import shutil
import nose.tools
import blackbird.utils.configread
import blackbird.utils.error
class TestConfigReaderGetGlobalIncludeAbsPath(object):
def __init__(self):
infile = (
'[global]',
'user = nobody',
'group = nobody'
)
self.test_config = blackbird.utils.configread.ConfigReader(
infile=infile
)
def test_abs_path(self):
test_value = '/etc/blackbird/conf.d/*.cfg'
nose.tools.eq_(
test_value,
self.test_config._get_global_include_abs_path(
test_value
)
)
def test_relative_path(self):
test_value = './blackbird/*.cfg'
nose.tools.ok_(
os.path.isabs(
self.test_config._get_global_include_abs_path(
test_value
)
)
)
def test_abs_dir(self):
test_value = os.path.join(
__file__, '../../etc/'
)
test_value = os.path.abspath(test_value)
test_value = self.test_config._get_global_include_abs_path(
test_value
)
nose.tools.ok_(
os.path.isabs(test_value) and
test_value.endswith('*')
)
def test_relative_dir(self):
test_value = self.test_config._get_global_include_abs_path(
'./'
)
nose.tools.ok_(
os.path.isabs(test_value) and
test_value.endswith('*')
)
class TestConfigReaderValidateGlobalInclude(object):
def __init__(self):
infile = (
'[global]',
'user = nobody',
'group = nobody'
)
self.test_config = blackbird.utils.configread.ConfigReader(
infile=infile
)
self.tmp_dir = os.path.join(
__file__, '../../tmp'
)
self.tmp_dir = os.path.abspath(self.tmp_dir)
def teardown(self):
if os.path.exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def test_abs_path(self):
test_value = os.path.join(
__file__, '../../etc/*'
)
test_value = os.path.abspath(test_value)
nose.tools.ok_(
self.test_config._validate_global_include(test_value)
)
@nose.tools.raises(
blackbird.utils.error.BlackbirdError
)
def test_non_exists_abs_path(self):
test_value = os.path.join(
__file__, '../../etc/hogehoge/*'
)
test_value = os.path.abspath(test_value)
self.test_config._validate_global_include(test_value)
# Using `mkdir` is compelling for like this test.
@nose.tools.raises(
blackbird.utils.error.BlackbirdError
)
def test_cannot_read_abs_path(self):
tmp_dir = os.path.join(
__file__, '../../tmp'
)
tmp_dir = os.path.abspath(tmp_dir) + '/'
os.mkdir(tmp_dir, 0o000)
self.test_config._validate_global_include(tmp_dir)
class TestConfigReaderMergeIncludes(object):
def __init__(self):
self.include_dir = os.path.join(
os.path.dirname(__file__), '../etc/blackbird/conf.d/'
)
infile = (
'[global]',
'user = nobody',
'group = nobody',
'include = {0}'.format(self.include_dir)
)
self.test_config = blackbird.utils.configread.ConfigReader(
infile=infile
)
def teardown(self):
for config in glob.glob(self.include_dir + '*'):
os.remove(config)
def test_merge_one_config(self):
infile = (
'[test_statistics]\n',
'module = statistics'
)
with open(
os.path.join(self.include_dir, 'test_stats.cfg'), 'w'
) as f:
f.writelines(infile)
self.test_config._merge_includes()
nose.tools.ok_(
'test_statistics' in self.test_config.config.keys()
)
| 26.363636 | 67 | 0.556897 |
import os
import glob
import shutil
import nose.tools
import blackbird.utils.configread
import blackbird.utils.error
class TestConfigReaderGetGlobalIncludeAbsPath(object):
def __init__(self):
infile = (
'[global]',
'user = nobody',
'group = nobody'
)
self.test_config = blackbird.utils.configread.ConfigReader(
infile=infile
)
def test_abs_path(self):
test_value = '/etc/blackbird/conf.d/*.cfg'
nose.tools.eq_(
test_value,
self.test_config._get_global_include_abs_path(
test_value
)
)
def test_relative_path(self):
test_value = './blackbird/*.cfg'
nose.tools.ok_(
os.path.isabs(
self.test_config._get_global_include_abs_path(
test_value
)
)
)
def test_abs_dir(self):
test_value = os.path.join(
__file__, '../../etc/'
)
test_value = os.path.abspath(test_value)
test_value = self.test_config._get_global_include_abs_path(
test_value
)
nose.tools.ok_(
os.path.isabs(test_value) and
test_value.endswith('*')
)
def test_relative_dir(self):
test_value = self.test_config._get_global_include_abs_path(
'./'
)
nose.tools.ok_(
os.path.isabs(test_value) and
test_value.endswith('*')
)
class TestConfigReaderValidateGlobalInclude(object):
def __init__(self):
infile = (
'[global]',
'user = nobody',
'group = nobody'
)
self.test_config = blackbird.utils.configread.ConfigReader(
infile=infile
)
self.tmp_dir = os.path.join(
__file__, '../../tmp'
)
self.tmp_dir = os.path.abspath(self.tmp_dir)
def teardown(self):
if os.path.exists(self.tmp_dir):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def test_abs_path(self):
test_value = os.path.join(
__file__, '../../etc/*'
)
test_value = os.path.abspath(test_value)
nose.tools.ok_(
self.test_config._validate_global_include(test_value)
)
@nose.tools.raises(
blackbird.utils.error.BlackbirdError
)
def test_non_exists_abs_path(self):
test_value = os.path.join(
__file__, '../../etc/hogehoge/*'
)
test_value = os.path.abspath(test_value)
self.test_config._validate_global_include(test_value)
@nose.tools.raises(
blackbird.utils.error.BlackbirdError
)
def test_cannot_read_abs_path(self):
tmp_dir = os.path.join(
__file__, '../../tmp'
)
tmp_dir = os.path.abspath(tmp_dir) + '/'
os.mkdir(tmp_dir, 0o000)
self.test_config._validate_global_include(tmp_dir)
class TestConfigReaderMergeIncludes(object):
def __init__(self):
self.include_dir = os.path.join(
os.path.dirname(__file__), '../etc/blackbird/conf.d/'
)
infile = (
'[global]',
'user = nobody',
'group = nobody',
'include = {0}'.format(self.include_dir)
)
self.test_config = blackbird.utils.configread.ConfigReader(
infile=infile
)
def teardown(self):
for config in glob.glob(self.include_dir + '*'):
os.remove(config)
def test_merge_one_config(self):
infile = (
'[test_statistics]\n',
'module = statistics'
)
with open(
os.path.join(self.include_dir, 'test_stats.cfg'), 'w'
) as f:
f.writelines(infile)
self.test_config._merge_includes()
nose.tools.ok_(
'test_statistics' in self.test_config.config.keys()
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.