hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72f90fd5e586ab49bbf8330d152dd3b15c6712f | 2,341 | py | Python | app/lib/models.py | simonlevine/x-transformer-icd | 17d0a84f8b8e1f69623a82c0afab26830c7a1eb8 | [
"BSD-3-Clause"
] | 1 | 2020-12-15T00:55:25.000Z | 2020-12-15T00:55:25.000Z | app/lib/models.py | simonlevine/x-transformer-icd | 17d0a84f8b8e1f69623a82c0afab26830c7a1eb8 | [
"BSD-3-Clause"
] | null | null | null | app/lib/models.py | simonlevine/x-transformer-icd | 17d0a84f8b8e1f69623a82c0afab26830c7a1eb8 | [
"BSD-3-Clause"
] | null | null | null | """deserialize auto-icd models and provide a consistent interface"""
import typing as t
import json
import pickle
from pathlib import Path
import numpy as np
import onnxruntime as rt
APP_ROOT = Path("./app")
ASSETS_DIR = APP_ROOT/"assets"
class AutoICDModel:
def __init__(self, onnx_model_fp):
assert onnx_model_fp.exists()
self.sess = rt.InferenceSession(str(onnx_model_fp.resolve()))
def ___call__(self, free_text: str) -> t.Set[str]:
raise NotImplementedError("Subclasses just provide model interaction logic!")
# class KissModel(AutoICDModel):
# def __init__(self, onnx_model_fp, icd9_codes: t.List[str]):
# """because we are only loading a few codes,
# we need to know which ones in otder to decode
# decode the model output, which is a 1x|icd9_codes| matrix"""
# super().__init__(onnx_model_fp)
# self.icd9_codes = icd9_codes
# def ___call__(self, free_text: str) -> t.Set[str]:
# X = np.array([[free_text]])
# predictions, predictions_proba \
# = sess.run(None, {"free_text_input": X})[0]
# codes_predicted = [
# code for prediction, code in zip(predictions, self.icd9_codes)
# if prediction == 1 # i.e., if the code is predicted to be present
# ]
# codes2predicted_proba = {
# code: proba for code, proba in zip(self.icd9_codes, predictions_proba)
# }
# return codes_predicted, codes2predicted_proba
# def get_kiss_model():
# onnx_model_fp = ASSETS_DIR/"kiss_model.onnx"
# with open(ASSETS_DIR/"kiss_model.onnx.metadata.json") as f:
# icd9_codes = json.load(f)["icd9_codes_relevant"]
# model = KissModel(onnx_model_fp, icd9_codes)
# return model
class KissModel:
"""Kiss Model using pickle for persistence"""
def __init__(self):
with open(ASSETS_DIR/"kiss_model.pkl.metadata.json") as f_meta:
self.icd9_codes = json.load(f_meta)["icd9_codes_relevant"]
with open(ASSETS_DIR/"kiss_model.pkl", "rb") as f:
self.model = pickle.loads(f.read())
def __call__(self, free_text: str):
X = np.array([free_text])
predicted_codes_proba = self.model.predict_proba(X)
return np.array([proba.tolist() for proba in predicted_codes_proba])[:,0,1] | 34.940299 | 85 | 0.656557 |
import typing as t
import json
import pickle
from pathlib import Path
import numpy as np
import onnxruntime as rt
APP_ROOT = Path("./app")
ASSETS_DIR = APP_ROOT/"assets"
class AutoICDModel:
def __init__(self, onnx_model_fp):
assert onnx_model_fp.exists()
self.sess = rt.InferenceSession(str(onnx_model_fp.resolve()))
def ___call__(self, free_text: str) -> t.Set[str]:
raise NotImplementedError("Subclasses just provide model interaction logic!")
# we need to know which ones in otder to decode
# decode the model output, which is a 1x|icd9_codes| matrix"""
it__(self):
with open(ASSETS_DIR/"kiss_model.pkl.metadata.json") as f_meta:
self.icd9_codes = json.load(f_meta)["icd9_codes_relevant"]
with open(ASSETS_DIR/"kiss_model.pkl", "rb") as f:
self.model = pickle.loads(f.read())
def __call__(self, free_text: str):
X = np.array([free_text])
predicted_codes_proba = self.model.predict_proba(X)
return np.array([proba.tolist() for proba in predicted_codes_proba])[:,0,1] | true | true |
f72f9151e8655b5897d9a3dbddc1abcb2e46e0c6 | 1,689 | py | Python | app.py | Catlinman/catlinman.com | 9a94b8491975cf589b9cd53262a54fa56b7a0555 | [
"MIT"
] | 2 | 2017-07-04T11:51:30.000Z | 2019-10-07T08:04:12.000Z | app.py | Catlinman/catlinman.com | 9a94b8491975cf589b9cd53262a54fa56b7a0555 | [
"MIT"
] | null | null | null | app.py | Catlinman/catlinman.com | 9a94b8491975cf589b9cd53262a54fa56b7a0555 | [
"MIT"
] | 2 | 2020-09-29T06:56:07.000Z | 2020-10-02T21:49:31.000Z |
# Import Python modules.
import sys
# Import application modules.
import assets
import database
import blueprints
# Import basic Sanic modules.
from sanic import Sanic
# Get the required Jinja2 module for rendering templates.
import jinja2 as j2
# Enabling async template execution which allows you to take advantage of newer
# Python features requires Python 3.6 or later.
enable_async = sys.version_info >= (3, 6)
# Create a new Sanic application.
app = Sanic(name="catlinman.com", register=False)
# Setup the static directory.
app.static("/static", "./static")
# Load the template environment with async and jac support.
template_env = j2.Environment(
loader=j2.PackageLoader("app", "templates"),
autoescape=j2.select_autoescape(["html", "xml"]),
enable_async=enable_async,
trim_blocks=True,
lstrip_blocks=True
)
app.config.template_env = template_env
# Add middleware blueprints to this project.
app.blueprint(blueprints.middleware)
# Add all blueprints to this project.
app.blueprint(blueprints.root)
app.blueprint(blueprints.user)
app.blueprint(blueprints.about)
app.blueprint(blueprints.blog)
app.blueprint(blueprints.contact)
app.blueprint(blueprints.error)
app.blueprint(blueprints.gallery)
app.blueprint(blueprints.project)
# Load data blueprints into the data route.
app.blueprint(blueprints.location, url_prefix='/data')
app.blueprint(blueprints.psa, url_prefix='/data')
app.blueprint(blueprints.template, url_prefix='/data')
if __name__ == "__main__":
# Build all our assets.
assets.build_assets()
# Run the main application.
app.run(
host="127.0.0.1",
port=24070,
workers=1,
debug=False
)
| 25.590909 | 79 | 0.748372 |
import sys
import assets
import database
import blueprints
from sanic import Sanic
import jinja2 as j2
enable_async = sys.version_info >= (3, 6)
app = Sanic(name="catlinman.com", register=False)
app.static("/static", "./static")
template_env = j2.Environment(
loader=j2.PackageLoader("app", "templates"),
autoescape=j2.select_autoescape(["html", "xml"]),
enable_async=enable_async,
trim_blocks=True,
lstrip_blocks=True
)
app.config.template_env = template_env
app.blueprint(blueprints.middleware)
app.blueprint(blueprints.root)
app.blueprint(blueprints.user)
app.blueprint(blueprints.about)
app.blueprint(blueprints.blog)
app.blueprint(blueprints.contact)
app.blueprint(blueprints.error)
app.blueprint(blueprints.gallery)
app.blueprint(blueprints.project)
app.blueprint(blueprints.location, url_prefix='/data')
app.blueprint(blueprints.psa, url_prefix='/data')
app.blueprint(blueprints.template, url_prefix='/data')
if __name__ == "__main__":
assets.build_assets()
app.run(
host="127.0.0.1",
port=24070,
workers=1,
debug=False
)
| true | true |
f72f91b9b9f83a577df103957548819b165bf8d5 | 16,826 | py | Python | pychron/hardware/core/communicators/serial_communicator.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | null | null | null | pychron/hardware/core/communicators/serial_communicator.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | 80 | 2018-07-17T20:10:20.000Z | 2021-08-17T15:38:24.000Z | pychron/hardware/core/communicators/serial_communicator.py | ael-noblegas/pychron | 6ebbbb1f66a614972b62b7a9be4c784ae61b5d62 | [
"Apache-2.0"
] | null | null | null | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =============enthought library imports=======================
# =============standard library imports ========================
import codecs
import glob
import os
import sys
import time
import serial
# =============local library imports ==========================
from .communicator import Communicator, process_response, prep_str, remove_eol_func
def get_ports():
if sys.platform == 'win32':
ports = ['COM{}'.format(i+1) for i in range(256)]
else:
usb = glob.glob('/dev/tty.usb*')
furpi = glob.glob('/dev/furpi.*')
pychron = glob.glob('/dev/pychron.*')
slab = glob.glob('/dev/tty.SLAB*')
if sys.platform == 'darwin':
keyspan = glob.glob('/dev/tty.U*')
else:
keyspan = glob.glob('/dev/ttyU*')
ports = keyspan + usb + furpi + pychron + slab
return ports
class SerialCommunicator(Communicator):
"""
Base Class for devices that communicate using a rs232 serial port.
Using Keyspan serial converter is the best option for a Mac
class is built on top of pyserial. Pyserial is used to create a handle and
this class uses the handle to read and write.
handles are created when a serial device is opened
setup args are loaded using load(). this method should be overwritten to
load specific items.
"""
# char_write = False
_auto_find_handle = False
_auto_write_handle = False
baudrate = None
port = None
bytesize = None
parity = None
stopbits = None
timeout = None
id_query = ''
id_response = ''
read_delay = None
read_terminator = None
read_terminator_position = None
clear_output = False
_config = None
_comms_report_attrs = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout')
@property
def address(self):
return self.port
def test_connection(self):
return self.handle is not None
def reset(self):
handle = self.handle
try:
isopen = handle.isOpen()
orate = handle.getBaudrate()
if isopen:
handle.close()
handle.setBaudrate(0)
handle.open()
time.sleep(0.1)
handle.close()
handle.setBaudrate(orate)
if isopen:
handle.open()
except Exception:
self.warning('failed to reset connection')
def close(self):
if self.handle:
self.debug('closing handle {}'.format(self.handle))
self.handle.close()
def load_comdict(self, port, baudrate=9600, bytesize=8, parity=None, stopbits=1):
self.baudrate = baudrate
self.port = port
self.set_parity(parity)
self.set_stopbits(stopbits)
self.bytesize = bytesize
def load(self, config, path):
self.config_path = path
self._config = config
self.set_attribute(config, 'port', 'Communications', 'port')
self.set_attribute(config, 'baudrate', 'Communications', 'baudrate',
cast='int', optional=True)
self.set_attribute(config, 'bytesize', 'Communications', 'bytesize',
cast='int', optional=True)
self.set_attribute(config, 'timeout', 'Communications', 'timeout',
cast='float', optional=True)
self.set_attribute(config, 'clear_output', 'Communications', 'clear_output',
cast='boolean', optional=True)
parity = self.config_get(config, 'Communications', 'parity', optional=True)
self.set_parity(parity)
stopbits = self.config_get(config, 'Communications', 'stopbits', optional=True)
self.set_stopbits(stopbits)
self.set_attribute(config, 'read_delay', 'Communications', 'read_delay',
cast='float', optional=True, default=25)
self.set_attribute(config, 'read_terminator', 'Communications', 'terminator',
optional=True, default=None)
self.set_attribute(config, 'read_terminator_position', 'Communications', 'terminator_position',
optional=True, default=None, cast='int')
self.set_attribute(config, 'write_terminator', 'Communications', 'write_terminator',
optional=True, default=b'\r')
if self.write_terminator == 'CRLF':
self.write_terminator = b'\r\n'
if self.read_terminator == 'CRLF':
self.read_terminator = b'\r\n'
if self.read_terminator == 'ETX':
self.read_terminator = chr(3)
def set_parity(self, parity):
if parity:
self.parity = getattr(serial, 'PARITY_%s' % parity.upper())
def set_stopbits(self, stopbits):
if stopbits:
if stopbits in ('1', 1):
stopbits = 'ONE'
elif stopbits in ('2', 2):
stopbits = 'TWO'
self.stopbits = getattr(serial, 'STOPBITS_{}'.format(stopbits.upper()))
def tell(self, cmd, is_hex=False, info=None, verbose=True, **kw):
"""
"""
if self.handle is None:
if verbose:
info = 'no handle'
self.log_tell(cmd, info)
return
with self._lock:
self._write(cmd, is_hex=is_hex)
if verbose:
self.log_tell(cmd, info)
def read(self, nchars=None, *args, **kw):
"""
"""
with self._lock:
if nchars is not None:
r = self._read_nchars(nchars)
else:
r = self._read_terminator(*args, **kw)
return r
def ask(self, cmd, is_hex=False, verbose=True, delay=None,
replace=None, remove_eol=True, info=None, nbytes=None,
handshake_only=False,
handshake=None,
read_terminator=None,
terminator_position=None,
nchars=None):
"""
"""
if self.handle is None:
if verbose:
x = prep_str(cmd.strip())
self.info('no handle {}'.format(x))
return
if not self.handle.isOpen():
return
with self._lock:
if self.clear_output:
self.handle.flushInput()
self.handle.flushOutput()
cmd = self._write(cmd, is_hex=is_hex)
if cmd is None:
return
if is_hex:
if nbytes is None:
nbytes = 8
re = self._read_hex(nbytes=nbytes, delay=delay)
elif handshake is not None:
re = self._read_handshake(handshake, handshake_only, delay=delay)
elif nchars is not None:
re = self._read_nchars(nchars)
else:
re = self._read_terminator(delay=delay,
terminator=read_terminator,
terminator_position=terminator_position)
if remove_eol and not is_hex:
re = remove_eol_func(re)
if verbose:
pre = process_response(re, replace, remove_eol=not is_hex)
self.log_response(cmd, pre, info)
return re
def open(self, **kw):
"""
Use pyserial to create a handle connected to port wth baudrate
default handle parameters
baudrate=9600
bytesize=EIGHTBITS
parity= PARITY_NONE
stopbits= STOPBITS_ONE
timeout=None
"""
port = kw.get('port')
if port is None:
port = self.port
if port is None:
self.warning('Port not set')
return False
# #on windows device handles probably handled differently
if sys.platform == 'darwin':
port = '/dev/tty.{}'.format(port)
kw['port'] = port
for key in ['baudrate', 'bytesize', 'parity', 'stopbits', 'timeout']:
v = kw.get(key)
if v is None:
v = getattr(self, key)
if v is not None:
kw[key] = v
pref = kw.pop('prefs', None)
if pref is not None:
pref = pref.serial_preference
self._auto_find_handle = pref.auto_find_handle
self._auto_write_handle = pref.auto_write_handle
self.simulation = True
if self._validate_address(port):
try_connect = True
while try_connect:
try:
self.debug('Connection parameters={}'.format(kw))
self.handle = serial.Serial(**kw)
try_connect = False
self.simulation = False
except serial.serialutil.SerialException:
try_connect = False
self.debug_exception()
elif self._auto_find_handle:
self._find_handle(**kw)
self.debug('Serial device: {}'.format(self.handle))
return self.handle is not None # connected is true if handle is not None
# private
def _get_report_value(self, key):
c, value = super(SerialCommunicator, self)._get_report_value(key)
if self.handle:
value = getattr(self.handle, key)
return c, value
def _find_handle(self, **kw):
found = False
self.simulation = False
self.info('Trying to find correct port')
port = None
for port in get_ports():
self.info('trying port {}'.format(port))
kw['port'] = port
try:
self.handle = serial.Serial(**kw)
except serial.SerialException:
continue
r = self.ask(self.id_query)
# use id_response as a callable to do device specific
# checking
if callable(self.id_response):
if self.id_response(r):
found = True
self.simulation = False
break
if r == self.id_response:
found = True
self.simulation = False
break
if not found:
# update the port
if self._auto_write_handle and port:
# port in form
# /dev/tty.USAXXX1.1
p = os.path.split(port)[-1]
# remove tty.
p = p[4:]
self._config.set('Communication', 'port', )
self.write_configuration(self._config, self.config_path)
self.handle = None
self.simulation = True
def _validate_address(self, port):
"""
use glob to check the avaibable serial ports
valid ports start with /dev/tty.U or /dev/tty.usbmodem
"""
valid = get_ports()
if port in valid:
return True
else:
msg = '{} is not a valid port address'.format(port)
self.warning(msg)
if not valid:
self.warning('No valid ports')
else:
self.warning('======== Valid Ports ========')
for v in valid:
self.warning(v)
self.warning('=============================')
def _write(self, cmd, is_hex=False):
"""
use the serial handle to write the cmd to the serial buffer
return True if there is an exception writing cmd
"""
if not self.simulation:
if not isinstance(cmd, bytes):
cmd = bytes(cmd, 'utf-8')
if is_hex:
cmd = codecs.decode(cmd, 'hex')
else:
wt = self.write_terminator
if wt is not None:
if not isinstance(wt, bytes):
wt = bytes(wt, 'utf-8')
cmd += wt
try:
self.handle.write(cmd)
except (serial.serialutil.SerialException, OSError, IOError, ValueError) as e:
self.warning('Serial Communicator write execption: {}'.format(e))
return
return cmd
def _read_nchars(self, nchars, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nchars(nchars, r), delay, timeout)
def _read_hex(self, nbytes=8, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nbytes(nbytes, r), delay, timeout)
def _read_handshake(self, handshake, handshake_only, timeout=1, delay=None):
def hfunc(r):
terminated = False
ack, r = self._check_handshake(handshake)
if handshake_only and ack:
r = handshake[0]
terminated = True
elif ack and r is not None:
terminated = True
return r, terminated
return self._read_loop(hfunc, delay, timeout)
def _read_terminator(self, timeout=1, delay=None,
terminator=None, terminator_position=None):
if terminator is None:
terminator = self.read_terminator
if terminator_position is None:
terminator_position = self.read_terminator_position
if terminator is None:
terminator = (b'\r\x00', b'\r\n', b'\r', b'\n')
if not isinstance(terminator, (list, tuple)):
terminator = (terminator,)
def func(r):
terminated = False
try:
inw = self.handle.inWaiting()
r += self.handle.read(inw)
if r and r.strip():
for ti in terminator:
if terminator_position:
terminated = r[terminator_position] == ti
else:
if isinstance(ti, str):
ti = ti.encode()
terminated = r.endswith(ti)
if terminated:
break
except BaseException as e:
self.warning(e)
return r, terminated
return self._read_loop(func, delay, timeout)
def _get_nbytes(self, *args, **kw):
"""
1 byte == 2 chars
"""
return self._get_nchars(*args, **kw)
def _get_nchars(self, nchars, r):
handle = self.handle
inw = handle.inWaiting()
c = min(inw, nchars - len(r))
r += handle.read(c)
return r[:nchars], len(r) >= nchars
def _check_handshake(self, handshake_chrs):
ack, nak = handshake_chrs
inw = self.handle.inWaiting()
r = self.handle.read(inw)
if r:
return ack == r[0], r[1:]
return False, None
def _read_loop(self, func, delay, timeout=1):
if delay is not None:
time.sleep(delay / 1000.)
elif self.read_delay:
time.sleep(self.read_delay / 1000.)
r = b''
st = time.time()
handle = self.handle
ct = time.time()
while ct - st < timeout:
if not handle.isOpen():
break
try:
r, isterminated = func(r)
if isterminated:
break
except (ValueError, TypeError):
pass
time.sleep(0.01)
ct = time.time()
if ct - st > timeout:
l = len(r) if r else 0
self.info('timed out. {}s r={}, len={}'.format(timeout, r, l))
return r
if __name__ == '__main__':
s = SerialCommunicator()
s.read_delay = 0
s.port = 'usbmodemfd1221'
s.open()
time.sleep(2)
s.tell('A', verbose=False)
for i in range(10):
print('dddd', s.ask('1', verbose=False))
time.sleep(1)
# s.tell('ddd', verbose=False)
# print s.ask('ddd', verbose=False)
# ===================== EOF ==========================================
| 31.927894 | 103 | 0.524664 |
import codecs
import glob
import os
import sys
import time
import serial
from .communicator import Communicator, process_response, prep_str, remove_eol_func
def get_ports():
if sys.platform == 'win32':
ports = ['COM{}'.format(i+1) for i in range(256)]
else:
usb = glob.glob('/dev/tty.usb*')
furpi = glob.glob('/dev/furpi.*')
pychron = glob.glob('/dev/pychron.*')
slab = glob.glob('/dev/tty.SLAB*')
if sys.platform == 'darwin':
keyspan = glob.glob('/dev/tty.U*')
else:
keyspan = glob.glob('/dev/ttyU*')
ports = keyspan + usb + furpi + pychron + slab
return ports
class SerialCommunicator(Communicator):
_auto_find_handle = False
_auto_write_handle = False
baudrate = None
port = None
bytesize = None
parity = None
stopbits = None
timeout = None
id_query = ''
id_response = ''
read_delay = None
read_terminator = None
read_terminator_position = None
clear_output = False
_config = None
_comms_report_attrs = ('port', 'baudrate', 'bytesize', 'parity', 'stopbits', 'timeout')
@property
def address(self):
return self.port
def test_connection(self):
return self.handle is not None
def reset(self):
handle = self.handle
try:
isopen = handle.isOpen()
orate = handle.getBaudrate()
if isopen:
handle.close()
handle.setBaudrate(0)
handle.open()
time.sleep(0.1)
handle.close()
handle.setBaudrate(orate)
if isopen:
handle.open()
except Exception:
self.warning('failed to reset connection')
def close(self):
if self.handle:
self.debug('closing handle {}'.format(self.handle))
self.handle.close()
def load_comdict(self, port, baudrate=9600, bytesize=8, parity=None, stopbits=1):
self.baudrate = baudrate
self.port = port
self.set_parity(parity)
self.set_stopbits(stopbits)
self.bytesize = bytesize
def load(self, config, path):
self.config_path = path
self._config = config
self.set_attribute(config, 'port', 'Communications', 'port')
self.set_attribute(config, 'baudrate', 'Communications', 'baudrate',
cast='int', optional=True)
self.set_attribute(config, 'bytesize', 'Communications', 'bytesize',
cast='int', optional=True)
self.set_attribute(config, 'timeout', 'Communications', 'timeout',
cast='float', optional=True)
self.set_attribute(config, 'clear_output', 'Communications', 'clear_output',
cast='boolean', optional=True)
parity = self.config_get(config, 'Communications', 'parity', optional=True)
self.set_parity(parity)
stopbits = self.config_get(config, 'Communications', 'stopbits', optional=True)
self.set_stopbits(stopbits)
self.set_attribute(config, 'read_delay', 'Communications', 'read_delay',
cast='float', optional=True, default=25)
self.set_attribute(config, 'read_terminator', 'Communications', 'terminator',
optional=True, default=None)
self.set_attribute(config, 'read_terminator_position', 'Communications', 'terminator_position',
optional=True, default=None, cast='int')
self.set_attribute(config, 'write_terminator', 'Communications', 'write_terminator',
optional=True, default=b'\r')
if self.write_terminator == 'CRLF':
self.write_terminator = b'\r\n'
if self.read_terminator == 'CRLF':
self.read_terminator = b'\r\n'
if self.read_terminator == 'ETX':
self.read_terminator = chr(3)
def set_parity(self, parity):
if parity:
self.parity = getattr(serial, 'PARITY_%s' % parity.upper())
def set_stopbits(self, stopbits):
if stopbits:
if stopbits in ('1', 1):
stopbits = 'ONE'
elif stopbits in ('2', 2):
stopbits = 'TWO'
self.stopbits = getattr(serial, 'STOPBITS_{}'.format(stopbits.upper()))
def tell(self, cmd, is_hex=False, info=None, verbose=True, **kw):
if self.handle is None:
if verbose:
info = 'no handle'
self.log_tell(cmd, info)
return
with self._lock:
self._write(cmd, is_hex=is_hex)
if verbose:
self.log_tell(cmd, info)
def read(self, nchars=None, *args, **kw):
with self._lock:
if nchars is not None:
r = self._read_nchars(nchars)
else:
r = self._read_terminator(*args, **kw)
return r
def ask(self, cmd, is_hex=False, verbose=True, delay=None,
replace=None, remove_eol=True, info=None, nbytes=None,
handshake_only=False,
handshake=None,
read_terminator=None,
terminator_position=None,
nchars=None):
if self.handle is None:
if verbose:
x = prep_str(cmd.strip())
self.info('no handle {}'.format(x))
return
if not self.handle.isOpen():
return
with self._lock:
if self.clear_output:
self.handle.flushInput()
self.handle.flushOutput()
cmd = self._write(cmd, is_hex=is_hex)
if cmd is None:
return
if is_hex:
if nbytes is None:
nbytes = 8
re = self._read_hex(nbytes=nbytes, delay=delay)
elif handshake is not None:
re = self._read_handshake(handshake, handshake_only, delay=delay)
elif nchars is not None:
re = self._read_nchars(nchars)
else:
re = self._read_terminator(delay=delay,
terminator=read_terminator,
terminator_position=terminator_position)
if remove_eol and not is_hex:
re = remove_eol_func(re)
if verbose:
pre = process_response(re, replace, remove_eol=not is_hex)
self.log_response(cmd, pre, info)
return re
def open(self, **kw):
port = kw.get('port')
if port is None:
port = self.port
if port is None:
self.warning('Port not set')
return False
= '/dev/tty.{}'.format(port)
kw['port'] = port
for key in ['baudrate', 'bytesize', 'parity', 'stopbits', 'timeout']:
v = kw.get(key)
if v is None:
v = getattr(self, key)
if v is not None:
kw[key] = v
pref = kw.pop('prefs', None)
if pref is not None:
pref = pref.serial_preference
self._auto_find_handle = pref.auto_find_handle
self._auto_write_handle = pref.auto_write_handle
self.simulation = True
if self._validate_address(port):
try_connect = True
while try_connect:
try:
self.debug('Connection parameters={}'.format(kw))
self.handle = serial.Serial(**kw)
try_connect = False
self.simulation = False
except serial.serialutil.SerialException:
try_connect = False
self.debug_exception()
elif self._auto_find_handle:
self._find_handle(**kw)
self.debug('Serial device: {}'.format(self.handle))
return self.handle is not None
def _get_report_value(self, key):
c, value = super(SerialCommunicator, self)._get_report_value(key)
if self.handle:
value = getattr(self.handle, key)
return c, value
def _find_handle(self, **kw):
found = False
self.simulation = False
self.info('Trying to find correct port')
port = None
for port in get_ports():
self.info('trying port {}'.format(port))
kw['port'] = port
try:
self.handle = serial.Serial(**kw)
except serial.SerialException:
continue
r = self.ask(self.id_query)
if callable(self.id_response):
if self.id_response(r):
found = True
self.simulation = False
break
if r == self.id_response:
found = True
self.simulation = False
break
if not found:
if self._auto_write_handle and port:
p = os.path.split(port)[-1]
p = p[4:]
self._config.set('Communication', 'port', )
self.write_configuration(self._config, self.config_path)
self.handle = None
self.simulation = True
def _validate_address(self, port):
valid = get_ports()
if port in valid:
return True
else:
msg = '{} is not a valid port address'.format(port)
self.warning(msg)
if not valid:
self.warning('No valid ports')
else:
self.warning('======== Valid Ports ========')
for v in valid:
self.warning(v)
self.warning('=============================')
def _write(self, cmd, is_hex=False):
if not self.simulation:
if not isinstance(cmd, bytes):
cmd = bytes(cmd, 'utf-8')
if is_hex:
cmd = codecs.decode(cmd, 'hex')
else:
wt = self.write_terminator
if wt is not None:
if not isinstance(wt, bytes):
wt = bytes(wt, 'utf-8')
cmd += wt
try:
self.handle.write(cmd)
except (serial.serialutil.SerialException, OSError, IOError, ValueError) as e:
self.warning('Serial Communicator write execption: {}'.format(e))
return
return cmd
def _read_nchars(self, nchars, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nchars(nchars, r), delay, timeout)
def _read_hex(self, nbytes=8, timeout=1, delay=None):
return self._read_loop(lambda r: self._get_nbytes(nbytes, r), delay, timeout)
def _read_handshake(self, handshake, handshake_only, timeout=1, delay=None):
def hfunc(r):
terminated = False
ack, r = self._check_handshake(handshake)
if handshake_only and ack:
r = handshake[0]
terminated = True
elif ack and r is not None:
terminated = True
return r, terminated
return self._read_loop(hfunc, delay, timeout)
def _read_terminator(self, timeout=1, delay=None,
terminator=None, terminator_position=None):
if terminator is None:
terminator = self.read_terminator
if terminator_position is None:
terminator_position = self.read_terminator_position
if terminator is None:
terminator = (b'\r\x00', b'\r\n', b'\r', b'\n')
if not isinstance(terminator, (list, tuple)):
terminator = (terminator,)
def func(r):
terminated = False
try:
inw = self.handle.inWaiting()
r += self.handle.read(inw)
if r and r.strip():
for ti in terminator:
if terminator_position:
terminated = r[terminator_position] == ti
else:
if isinstance(ti, str):
ti = ti.encode()
terminated = r.endswith(ti)
if terminated:
break
except BaseException as e:
self.warning(e)
return r, terminated
return self._read_loop(func, delay, timeout)
def _get_nbytes(self, *args, **kw):
return self._get_nchars(*args, **kw)
def _get_nchars(self, nchars, r):
handle = self.handle
inw = handle.inWaiting()
c = min(inw, nchars - len(r))
r += handle.read(c)
return r[:nchars], len(r) >= nchars
def _check_handshake(self, handshake_chrs):
ack, nak = handshake_chrs
inw = self.handle.inWaiting()
r = self.handle.read(inw)
if r:
return ack == r[0], r[1:]
return False, None
def _read_loop(self, func, delay, timeout=1):
if delay is not None:
time.sleep(delay / 1000.)
elif self.read_delay:
time.sleep(self.read_delay / 1000.)
r = b''
st = time.time()
handle = self.handle
ct = time.time()
while ct - st < timeout:
if not handle.isOpen():
break
try:
r, isterminated = func(r)
if isterminated:
break
except (ValueError, TypeError):
pass
time.sleep(0.01)
ct = time.time()
if ct - st > timeout:
l = len(r) if r else 0
self.info('timed out. {}s r={}, len={}'.format(timeout, r, l))
return r
if __name__ == '__main__':
s = SerialCommunicator()
s.read_delay = 0
s.port = 'usbmodemfd1221'
s.open()
time.sleep(2)
s.tell('A', verbose=False)
for i in range(10):
print('dddd', s.ask('1', verbose=False))
time.sleep(1)
| true | true |
f72f91d02075c3865e71042a4c4631a5ce5c09f9 | 6,749 | py | Python | lfs/manage/views/marketing/featured.py | michael-hahn/django-lfs | 26c3471a8f8d88269c84f714f507b952dfdb6397 | [
"BSD-3-Clause"
] | 345 | 2015-01-03T19:19:27.000Z | 2022-03-20T11:00:50.000Z | lfs/manage/views/marketing/featured.py | michael-hahn/django-lfs | 26c3471a8f8d88269c84f714f507b952dfdb6397 | [
"BSD-3-Clause"
] | 73 | 2015-01-06T14:54:02.000Z | 2022-03-11T23:11:34.000Z | lfs/manage/views/marketing/featured.py | michael-hahn/django-lfs | 26c3471a8f8d88269c84f714f507b952dfdb6397 | [
"BSD-3-Clause"
] | 148 | 2015-01-07T16:30:08.000Z | 2022-03-25T21:20:58.000Z | import json
from django.contrib.auth.decorators import permission_required
from django.core.paginator import EmptyPage
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from lfs.caching.utils import lfs_get_object_or_404
from lfs.catalog.models import Category
from lfs.catalog.models import Product
from lfs.catalog.settings import VARIANT
from lfs.core.signals import featured_changed
from lfs.core.utils import LazyEncoder
from lfs.marketing.models import FeaturedProduct
@permission_required("manage_shop")
def manage_featured(request, template_name="manage/marketing/featured.html"):
"""
"""
inline = manage_featured_inline(request, as_string=True)
# amount options
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("featured-amount")
})
return render_to_string(template_name, request=request, context={
"featured_inline": inline,
"amount_options": amount_options,
})
@permission_required("manage_shop")
def manage_featured_inline(request, as_string=False, template_name="manage/marketing/featured_inline.html"):
"""
"""
featured = FeaturedProduct.objects.all()
featured_ids = [f.product.id for f in featured]
r = request.POST if request.method == 'POST' else request.GET
s = request.session
# If we get the parameter ``keep-filters`` or ``page`` we take the
# filters out of the request resp. session. The request takes precedence.
# The page parameter is given if the user clicks on the next/previous page
# links. The ``keep-filters`` parameters is given is the users adds/removes
# products. In this way we keeps the current filters when we needed to. If
# the whole page is reloaded there is no ``keep-filters`` or ``page`` and
# all filters are reset as they should.
if r.get("keep-filters") or r.get("page"):
page = r.get("page", s.get("featured_products_page", 1))
filter_ = r.get("filter", s.get("filter"))
category_filter = r.get("featured_category_filter", s.get("featured_category_filter"))
else:
page = r.get("page", 1)
filter_ = r.get("filter")
category_filter = r.get("featured_category_filter")
# The current filters are saved in any case for later use.
s["featured_products_page"] = page
s["filter"] = filter_
s["featured_category_filter"] = category_filter
try:
s["featured-amount"] = int(r.get("featured-amount", s.get("featured-amount")))
except TypeError:
s["featured-amount"] = 25
filters = Q()
if filter_:
filters &= Q(name__icontains=filter_)
filters |= Q(sku__icontains=filter_)
filters |= (Q(sub_type=VARIANT) & Q(active_sku=False) & Q(parent__sku__icontains=filter_))
filters |= (Q(sub_type=VARIANT) & Q(active_name=False) & Q(parent__name__icontains=filter_))
if category_filter:
if category_filter == "None":
filters &= Q(categories=None)
elif category_filter == "All":
pass
else:
# First we collect all sub categories and using the `in` operator
category = lfs_get_object_or_404(Category, pk=category_filter)
categories = [category]
categories.extend(category.get_all_children())
filters &= Q(categories__in=categories)
products = Product.objects.filter(filters).exclude(pk__in=featured_ids)
paginator = Paginator(products, s["featured-amount"])
total = products.count()
try:
page = paginator.page(page)
except EmptyPage:
page = 0
result = render_to_string(template_name, request=request, context={
"featured": featured,
"total": total,
"page": page,
"paginator": paginator,
"filter": filter_
})
if as_string:
return result
else:
return HttpResponse(
json.dumps({
"html": [["#featured-inline", result]],
}), content_type='application/json')
# Actions
@permission_required("manage_shop")
def add_featured(request):
"""Adds featured by given ids (within request body).
"""
for temp_id in request.POST.keys():
if temp_id.startswith("product") is False:
continue
temp_id = temp_id.split("-")[1]
FeaturedProduct.objects.create(product_id=temp_id)
_update_positions()
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been added.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("manage_shop")
def update_featured(request):
"""Saves or removes passed featured product passed id (within request body).
"""
if request.POST.get("action") == "remove":
for temp_id in request.POST.keys():
if not temp_id.startswith("product"):
continue
temp_id = temp_id.split("-")[1]
try:
featured = FeaturedProduct.objects.get(pk=temp_id)
featured.delete()
except (FeaturedProduct.DoesNotExist, ValueError):
pass
else:
_update_positions()
featured_changed.send(featured)
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been removed.")
}, cls=LazyEncoder)
else:
for temp_id in request.POST.keys():
if temp_id.startswith("position") is False:
continue
temp_id = temp_id.split("-")[1]
featured = FeaturedProduct.objects.get(pk=temp_id)
# Update position
position = request.POST.get("position-%s" % temp_id)
featured.position = position
featured.save()
_update_positions()
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been updated.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
def _update_positions():
for i, featured in enumerate(FeaturedProduct.objects.all()):
featured.position = (i + 1) * 10
featured.save()
| 33.577114 | 108 | 0.645281 | import json
from django.contrib.auth.decorators import permission_required
from django.core.paginator import EmptyPage
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from lfs.caching.utils import lfs_get_object_or_404
from lfs.catalog.models import Category
from lfs.catalog.models import Product
from lfs.catalog.settings import VARIANT
from lfs.core.signals import featured_changed
from lfs.core.utils import LazyEncoder
from lfs.marketing.models import FeaturedProduct
@permission_required("manage_shop")
def manage_featured(request, template_name="manage/marketing/featured.html"):
inline = manage_featured_inline(request, as_string=True)
amount_options = []
for value in (10, 25, 50, 100):
amount_options.append({
"value": value,
"selected": value == request.session.get("featured-amount")
})
return render_to_string(template_name, request=request, context={
"featured_inline": inline,
"amount_options": amount_options,
})
@permission_required("manage_shop")
def manage_featured_inline(request, as_string=False, template_name="manage/marketing/featured_inline.html"):
featured = FeaturedProduct.objects.all()
featured_ids = [f.product.id for f in featured]
r = request.POST if request.method == 'POST' else request.GET
s = request.session
if r.get("keep-filters") or r.get("page"):
page = r.get("page", s.get("featured_products_page", 1))
filter_ = r.get("filter", s.get("filter"))
category_filter = r.get("featured_category_filter", s.get("featured_category_filter"))
else:
page = r.get("page", 1)
filter_ = r.get("filter")
category_filter = r.get("featured_category_filter")
s["featured_products_page"] = page
s["filter"] = filter_
s["featured_category_filter"] = category_filter
try:
s["featured-amount"] = int(r.get("featured-amount", s.get("featured-amount")))
except TypeError:
s["featured-amount"] = 25
filters = Q()
if filter_:
filters &= Q(name__icontains=filter_)
filters |= Q(sku__icontains=filter_)
filters |= (Q(sub_type=VARIANT) & Q(active_sku=False) & Q(parent__sku__icontains=filter_))
filters |= (Q(sub_type=VARIANT) & Q(active_name=False) & Q(parent__name__icontains=filter_))
if category_filter:
if category_filter == "None":
filters &= Q(categories=None)
elif category_filter == "All":
pass
else:
category = lfs_get_object_or_404(Category, pk=category_filter)
categories = [category]
categories.extend(category.get_all_children())
filters &= Q(categories__in=categories)
products = Product.objects.filter(filters).exclude(pk__in=featured_ids)
paginator = Paginator(products, s["featured-amount"])
total = products.count()
try:
page = paginator.page(page)
except EmptyPage:
page = 0
result = render_to_string(template_name, request=request, context={
"featured": featured,
"total": total,
"page": page,
"paginator": paginator,
"filter": filter_
})
if as_string:
return result
else:
return HttpResponse(
json.dumps({
"html": [["#featured-inline", result]],
}), content_type='application/json')
@permission_required("manage_shop")
def add_featured(request):
for temp_id in request.POST.keys():
if temp_id.startswith("product") is False:
continue
temp_id = temp_id.split("-")[1]
FeaturedProduct.objects.create(product_id=temp_id)
_update_positions()
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been added.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("manage_shop")
def update_featured(request):
if request.POST.get("action") == "remove":
for temp_id in request.POST.keys():
if not temp_id.startswith("product"):
continue
temp_id = temp_id.split("-")[1]
try:
featured = FeaturedProduct.objects.get(pk=temp_id)
featured.delete()
except (FeaturedProduct.DoesNotExist, ValueError):
pass
else:
_update_positions()
featured_changed.send(featured)
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been removed.")
}, cls=LazyEncoder)
else:
for temp_id in request.POST.keys():
if temp_id.startswith("position") is False:
continue
temp_id = temp_id.split("-")[1]
featured = FeaturedProduct.objects.get(pk=temp_id)
position = request.POST.get("position-%s" % temp_id)
featured.position = position
featured.save()
_update_positions()
html = [["#featured-inline", manage_featured_inline(request, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Featured product has been updated.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
def _update_positions():
for i, featured in enumerate(FeaturedProduct.objects.all()):
featured.position = (i + 1) * 10
featured.save()
| true | true |
f72f92bc7ae99fd0568a2619f8d80e2d1390372f | 2,592 | py | Python | update.py | CyberHive/bucket-antivirus-function | bb5f91678cc85d08bfc42108edc399be5c5fc4b6 | [
"Apache-2.0"
] | null | null | null | update.py | CyberHive/bucket-antivirus-function | bb5f91678cc85d08bfc42108edc399be5c5fc4b6 | [
"Apache-2.0"
] | null | null | null | update.py | CyberHive/bucket-antivirus-function | bb5f91678cc85d08bfc42108edc399be5c5fc4b6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import boto3
import clamav
from common import AV_DEFINITION_PATH
from common import AV_DEFINITION_S3_BUCKET
from common import AV_DEFINITION_S3_PREFIX
from common import CLAMAVLIB_PATH
from common import get_timestamp
import shutil
def lambda_handler(event, context):
# s3 = boto3.resource("s3")
s3_client = boto3.client("s3")
print("Script starting at %s\n" % (get_timestamp()))
for root, dirs, files in os.walk(AV_DEFINITION_PATH):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# to_download = clamav.update_defs_from_s3(
# s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX
# )
print("Skipping clamav definition download %s\n" % (get_timestamp()))
# for download in to_download.values():
# s3_path = download["s3_path"]
# local_path = download["local_path"]
# print("Downloading definition file %s from s3://%s" % (local_path, s3_path))
# s3.Bucket(AV_DEFINITION_S3_BUCKET).download_file(s3_path, local_path)
# print("Downloading definition file %s complete!" % (local_path))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
# If main.cvd gets updated (very rare), we will need to force freshclam
# to download the compressed version to keep file sizes down.
# The existence of main.cud is the trigger to know this has happened.
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cud")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cud"))
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cvd")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cvd"))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
clamav.upload_defs_to_s3(
s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX, AV_DEFINITION_PATH
)
print("Script finished at %s\n" % get_timestamp())
| 39.272727 | 87 | 0.717978 |
import os
import boto3
import clamav
from common import AV_DEFINITION_PATH
from common import AV_DEFINITION_S3_BUCKET
from common import AV_DEFINITION_S3_PREFIX
from common import CLAMAVLIB_PATH
from common import get_timestamp
import shutil
def lambda_handler(event, context):
s3_client = boto3.client("s3")
print("Script starting at %s\n" % (get_timestamp()))
for root, dirs, files in os.walk(AV_DEFINITION_PATH):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
print("Skipping clamav definition download %s\n" % (get_timestamp()))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cud")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cud"))
if os.path.exists(os.path.join(AV_DEFINITION_PATH, "main.cvd")):
os.remove(os.path.join(AV_DEFINITION_PATH, "main.cvd"))
clamav.update_defs_from_freshclam(AV_DEFINITION_PATH, CLAMAVLIB_PATH)
clamav.upload_defs_to_s3(
s3_client, AV_DEFINITION_S3_BUCKET, AV_DEFINITION_S3_PREFIX, AV_DEFINITION_PATH
)
print("Script finished at %s\n" % get_timestamp())
| true | true |
f72f92c99cf839ad598a33d59f04bbdac9db7a62 | 867 | py | Python | src/tensra/nn.py | poghostick/tensra | d171fd7483b3a61e36e133c95abfb78a9395297d | [
"MIT"
] | null | null | null | src/tensra/nn.py | poghostick/tensra | d171fd7483b3a61e36e133c95abfb78a9395297d | [
"MIT"
] | null | null | null | src/tensra/nn.py | poghostick/tensra | d171fd7483b3a61e36e133c95abfb78a9395297d | [
"MIT"
] | null | null | null | """
A NeuralNet is just a collection of layers.
It behaves a lot like a layer itself, although
we're not going to make it one.
"""
from typing import Sequence, Iterator, Tuple
from .tensor import Tensor
from .layers import Layer
class NeuralNet:
def __init__(self, layers: Sequence[Layer]) -> None:
self.layers = layers
def forward(self, inputs: Tensor) -> Tensor:
for layer in self.layers:
inputs = layer.forward(inputs)
return inputs
def backward(self, grad: Tensor) -> Tensor:
for layer in reversed(self.layers):
grad = layer.backward(grad)
return grad
def params_and_grads(self) -> Iterator[Tuple[Tensor, Tensor]]:
for layer in self.layers:
for name, param in layer.params.items():
grad = layer.grads[name]
yield param, grad
| 27.967742 | 66 | 0.635525 | from typing import Sequence, Iterator, Tuple
from .tensor import Tensor
from .layers import Layer
class NeuralNet:
def __init__(self, layers: Sequence[Layer]) -> None:
self.layers = layers
def forward(self, inputs: Tensor) -> Tensor:
for layer in self.layers:
inputs = layer.forward(inputs)
return inputs
def backward(self, grad: Tensor) -> Tensor:
for layer in reversed(self.layers):
grad = layer.backward(grad)
return grad
def params_and_grads(self) -> Iterator[Tuple[Tensor, Tensor]]:
for layer in self.layers:
for name, param in layer.params.items():
grad = layer.grads[name]
yield param, grad
| true | true |
f72f93203fa30524663dd1ad94aabbba9deee380 | 5,466 | py | Python | talos/templates/models.py | bjtho08/talos | dbd27b46a019f6fbfb7b7f08b2eb0de3be0a41ad | [
"MIT"
] | 1 | 2020-11-08T03:12:22.000Z | 2020-11-08T03:12:22.000Z | talos/templates/models.py | bjtho08/talos | dbd27b46a019f6fbfb7b7f08b2eb0de3be0a41ad | [
"MIT"
] | null | null | null | talos/templates/models.py | bjtho08/talos | dbd27b46a019f6fbfb7b7f08b2eb0de3be0a41ad | [
"MIT"
] | null | null | null | def breast_cancer(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
from talos.metrics.keras_metrics import matthews, precision, recall, f1score
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc',
f1score,
recall,
precision,
matthews])
results = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'],
mode='moderate',
monitor='val_f1score')])
return results, model
def cervical_cancer(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
from talos.metrics.keras_metrics import matthews, precision, recall, f1score
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc',
f1score,
recall,
precision,
matthews])
results = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'],
mode='moderate',
monitor='val_f1score')])
return results, model
def titanic(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
# note how instead of passing the value, we pass a dictionary entry
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
# same here, just passing a dictionary entry
model.add(Dropout(params['dropout']))
# again, instead of the activation name, we have a dictionary entry
model.add(Dense(1, activation=params['last_activation']))
# here are using a learning rate boundary
model.compile(optimizer=params['optimizer'],
loss=params['losses'],
metrics=['acc'])
# here we are also using the early_stopper function for a callback
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=2,
verbose=0,
validation_data=[x_val, y_val])
return out, model
def iris(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
# note how instead of passing the value, we pass a dictionary entry
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
# same here, just passing a dictionary entry
model.add(Dropout(params['dropout']))
# with this call we can create any number of hidden layers
hidden_layers(model, params, y_train.shape[1])
# again, instead of the activation name, we have a dictionary entry
model.add(Dense(y_train.shape[1],
activation=params['last_activation']))
# here are using a learning rate boundary
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc'])
# here we are also using the early_stopper function for a callback
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'], mode=[1, 1])])
return out, model
| 35.72549 | 80 | 0.551409 | def breast_cancer(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
from talos.metrics.keras_metrics import matthews, precision, recall, f1score
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc',
f1score,
recall,
precision,
matthews])
results = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'],
mode='moderate',
monitor='val_f1score')])
return results, model
def cervical_cancer(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
from talos.metrics.keras_metrics import matthews, precision, recall, f1score
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, 1)
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc',
f1score,
recall,
precision,
matthews])
results = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'],
mode='moderate',
monitor='val_f1score')])
return results, model
def titanic(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
model.add(Dense(1, activation=params['last_activation']))
model.compile(optimizer=params['optimizer'],
loss=params['losses'],
metrics=['acc'])
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=2,
verbose=0,
validation_data=[x_val, y_val])
return out, model
def iris(x_train, y_train, x_val, y_val, params):
from keras.models import Sequential
from keras.layers import Dropout, Dense
from talos.model import lr_normalizer, early_stopper, hidden_layers
model = Sequential()
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
hidden_layers(model, params, y_train.shape[1])
model.add(Dense(y_train.shape[1],
activation=params['last_activation']))
model.compile(optimizer=params['optimizer']
(lr=lr_normalizer(params['lr'],
params['optimizer'])),
loss=params['losses'],
metrics=['acc'])
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val],
callbacks=[early_stopper(params['epochs'], mode=[1, 1])])
return out, model
| true | true |
f72f940275710e56f5500dcf7f4aacb8959a82b9 | 2,593 | py | Python | tensorflow_probability/python/bijectors/sigmoid_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/bijectors/sigmoid_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/bijectors/sigmoid_test.py | matthieucoquet/probability | 2426f4fc4743ceedc1a638a03d19ce6654ebff76 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Sigmoid Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from scipy import special
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
@test_util.run_all_in_graph_and_eager_modes
class SigmoidBijectorTest(tf.test.TestCase):
"""Tests correctness of the Y = g(X) = (1 + exp(-X))^-1 transformation."""
def testBijector(self):
self.assertStartsWith(tfb.Sigmoid().name, "sigmoid")
x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)
y = special.expit(x)
ildj = -np.log(y) - np.log1p(-y)
bijector = tfb.Sigmoid()
self.assertAllClose(
y, self.evaluate(bijector.forward(x)), atol=0., rtol=1e-2)
self.assertAllClose(
x, self.evaluate(bijector.inverse(y)), atol=0., rtol=1e-4)
self.assertAllClose(
ildj,
self.evaluate(bijector.inverse_log_det_jacobian(
y, event_ndims=0)), atol=0., rtol=1e-6)
self.assertAllClose(
-ildj,
self.evaluate(bijector.forward_log_det_jacobian(
x, event_ndims=0)), atol=0., rtol=1e-4)
def testScalarCongruency(self):
bijector_test_util.assert_scalar_congruency(
tfb.Sigmoid(), lower_x=-7., upper_x=7., eval_func=self.evaluate,
rtol=.1)
def testBijectiveAndFinite(self):
x = np.linspace(-100., 100., 100).astype(np.float32)
eps = 1e-3
y = np.linspace(eps, 1. - eps, 100).astype(np.float32)
bijector_test_util.assert_bijective_and_finite(
tfb.Sigmoid(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0.,
rtol=1e-4)
if __name__ == "__main__":
tf.test.main()
| 37.042857 | 115 | 0.69302 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow.python.framework import test_util
@test_util.run_all_in_graph_and_eager_modes
class SigmoidBijectorTest(tf.test.TestCase):
def testBijector(self):
self.assertStartsWith(tfb.Sigmoid().name, "sigmoid")
x = np.linspace(-10., 10., 100).reshape([2, 5, 10]).astype(np.float32)
y = special.expit(x)
ildj = -np.log(y) - np.log1p(-y)
bijector = tfb.Sigmoid()
self.assertAllClose(
y, self.evaluate(bijector.forward(x)), atol=0., rtol=1e-2)
self.assertAllClose(
x, self.evaluate(bijector.inverse(y)), atol=0., rtol=1e-4)
self.assertAllClose(
ildj,
self.evaluate(bijector.inverse_log_det_jacobian(
y, event_ndims=0)), atol=0., rtol=1e-6)
self.assertAllClose(
-ildj,
self.evaluate(bijector.forward_log_det_jacobian(
x, event_ndims=0)), atol=0., rtol=1e-4)
def testScalarCongruency(self):
bijector_test_util.assert_scalar_congruency(
tfb.Sigmoid(), lower_x=-7., upper_x=7., eval_func=self.evaluate,
rtol=.1)
def testBijectiveAndFinite(self):
x = np.linspace(-100., 100., 100).astype(np.float32)
eps = 1e-3
y = np.linspace(eps, 1. - eps, 100).astype(np.float32)
bijector_test_util.assert_bijective_and_finite(
tfb.Sigmoid(), x, y, eval_func=self.evaluate, event_ndims=0, atol=0.,
rtol=1e-4)
if __name__ == "__main__":
tf.test.main()
| true | true |
f72f9494e109e2041d0b65a04cdffcdfc754e555 | 127 | py | Python | pysparkrpc/server/__init__.py | abronte/PysparkAPI | 894f9a550109b7d3fc10573fb1f080972ed13d8d | [
"MIT"
] | 1 | 2020-07-31T17:50:50.000Z | 2020-07-31T17:50:50.000Z | pysparkrpc/server/__init__.py | abronte/PysparkAPI | 894f9a550109b7d3fc10573fb1f080972ed13d8d | [
"MIT"
] | 3 | 2021-06-06T18:25:43.000Z | 2021-06-07T00:26:44.000Z | pysparkrpc/server/__init__.py | abronte/PysparkAPI | 894f9a550109b7d3fc10573fb1f080972ed13d8d | [
"MIT"
] | null | null | null | from pysparkrpc.server.server import run
from pysparkrpc.server.capture import Capture
__all__ = [
'run', 'Capture'
]
| 18.142857 | 45 | 0.724409 | from pysparkrpc.server.server import run
from pysparkrpc.server.capture import Capture
__all__ = [
'run', 'Capture'
]
| true | true |
f72f94e64373f2e4a2564243a8a3d17883885b79 | 2,599 | py | Python | tests/test_unit/test_ga4gh/test_testbed/test_report/test_summary.py | alextsaihi/ga4gh-testbed-lib | ad1cb6ea2bac85ae81ce75dfbbb74ef3f9dc1252 | [
"Apache-2.0"
] | null | null | null | tests/test_unit/test_ga4gh/test_testbed/test_report/test_summary.py | alextsaihi/ga4gh-testbed-lib | ad1cb6ea2bac85ae81ce75dfbbb74ef3f9dc1252 | [
"Apache-2.0"
] | 3 | 2022-03-21T18:30:27.000Z | 2022-03-30T18:04:05.000Z | tests/test_unit/test_ga4gh/test_testbed/test_report/test_summary.py | alextsaihi/ga4gh-testbed-lib | ad1cb6ea2bac85ae81ce75dfbbb74ef3f9dc1252 | [
"Apache-2.0"
] | null | null | null | import pytest
from ga4gh.testbed.report.summary import Summary
increment_inputs = "count_type," \
+ "use_n," \
+ "n,"
increment_cases = [
("unknown", False, 1),
("unknown", True, 3),
("passed", False, 1),
("passed", True, 4),
("warned", False, 1),
("warned", True, 5),
("failed", False, 1),
("failed", True, 6),
("skipped", False, 1),
("skipped", True, 7)
]
summary_total_inputs = "unknown,passed,warned,failed,skipped,total"
summary_total_cases = [
(1, 1, 1, 1, 1, 5),
(10, 4, 6, 7, 12, 39)
]
aggregate_summary_inputs = "counts_a,counts_b,counts_exp"
aggregate_summary_cases = [
(
[1, 3, 5, 7, 9],
[2, 4, 6, 8, 10],
[3, 7, 11, 15, 19]
),
(
[15, 9, 6, 12, 13],
[42, 47, 31, 27, 26],
[57, 56, 37, 39, 39]
)
]
@pytest.mark.parametrize(increment_inputs, increment_cases)
def test_summary_increment(count_type, use_n, n):
summary = Summary()
increment_fn_name = "increment_" + count_type
getter_fn_name = "get_" + count_type
increment_fn = getattr(summary, increment_fn_name)
getter_fn = getattr(summary, getter_fn_name)
if use_n:
increment_fn(n=n)
else:
increment_fn()
assert getter_fn() == n
@pytest.mark.parametrize(summary_total_inputs, summary_total_cases)
def test_summary_get_total(unknown, passed, warned, failed, skipped, total):
summary = Summary()
summary.increment_unknown(n=unknown)
summary.increment_passed(n=passed)
summary.increment_warned(n=warned)
summary.increment_failed(n=failed)
summary.increment_skipped(n=skipped)
assert summary.get_total() == total
@pytest.mark.parametrize(aggregate_summary_inputs, aggregate_summary_cases)
def test_aggregate_summary(counts_a, counts_b, counts_exp):
def prep_summary(summary, counts):
summary.increment_unknown(n=counts[0])
summary.increment_passed(n=counts[1])
summary.increment_warned(n=counts[2])
summary.increment_failed(n=counts[3])
summary.increment_skipped(n=counts[4])
def assert_summary(summary, counts):
assert summary.get_unknown() == counts[0]
assert summary.get_passed() == counts[1]
assert summary.get_warned() == counts[2]
assert summary.get_failed() == counts[3]
assert summary.get_skipped() == counts[4]
summary_a = Summary()
summary_b = Summary()
prep_summary(summary_a, counts_a)
prep_summary(summary_b, counts_b)
summary_a.aggregate_summary(summary_b)
assert_summary(summary_a, counts_exp)
| 28.877778 | 76 | 0.652943 | import pytest
from ga4gh.testbed.report.summary import Summary
increment_inputs = "count_type," \
+ "use_n," \
+ "n,"
increment_cases = [
("unknown", False, 1),
("unknown", True, 3),
("passed", False, 1),
("passed", True, 4),
("warned", False, 1),
("warned", True, 5),
("failed", False, 1),
("failed", True, 6),
("skipped", False, 1),
("skipped", True, 7)
]
summary_total_inputs = "unknown,passed,warned,failed,skipped,total"
summary_total_cases = [
(1, 1, 1, 1, 1, 5),
(10, 4, 6, 7, 12, 39)
]
aggregate_summary_inputs = "counts_a,counts_b,counts_exp"
aggregate_summary_cases = [
(
[1, 3, 5, 7, 9],
[2, 4, 6, 8, 10],
[3, 7, 11, 15, 19]
),
(
[15, 9, 6, 12, 13],
[42, 47, 31, 27, 26],
[57, 56, 37, 39, 39]
)
]
@pytest.mark.parametrize(increment_inputs, increment_cases)
def test_summary_increment(count_type, use_n, n):
summary = Summary()
increment_fn_name = "increment_" + count_type
getter_fn_name = "get_" + count_type
increment_fn = getattr(summary, increment_fn_name)
getter_fn = getattr(summary, getter_fn_name)
if use_n:
increment_fn(n=n)
else:
increment_fn()
assert getter_fn() == n
@pytest.mark.parametrize(summary_total_inputs, summary_total_cases)
def test_summary_get_total(unknown, passed, warned, failed, skipped, total):
summary = Summary()
summary.increment_unknown(n=unknown)
summary.increment_passed(n=passed)
summary.increment_warned(n=warned)
summary.increment_failed(n=failed)
summary.increment_skipped(n=skipped)
assert summary.get_total() == total
@pytest.mark.parametrize(aggregate_summary_inputs, aggregate_summary_cases)
def test_aggregate_summary(counts_a, counts_b, counts_exp):
def prep_summary(summary, counts):
summary.increment_unknown(n=counts[0])
summary.increment_passed(n=counts[1])
summary.increment_warned(n=counts[2])
summary.increment_failed(n=counts[3])
summary.increment_skipped(n=counts[4])
def assert_summary(summary, counts):
assert summary.get_unknown() == counts[0]
assert summary.get_passed() == counts[1]
assert summary.get_warned() == counts[2]
assert summary.get_failed() == counts[3]
assert summary.get_skipped() == counts[4]
summary_a = Summary()
summary_b = Summary()
prep_summary(summary_a, counts_a)
prep_summary(summary_b, counts_b)
summary_a.aggregate_summary(summary_b)
assert_summary(summary_a, counts_exp)
| true | true |
f72f950331335ccf80a3f395f9946878cbb3df84 | 1,153 | py | Python | plotfunction.py | kayaei/pands-problem-set | a7c48059e3024955794c67d9e6f969a42f4e3a6d | [
"Apache-2.0"
] | null | null | null | plotfunction.py | kayaei/pands-problem-set | a7c48059e3024955794c67d9e6f969a42f4e3a6d | [
"Apache-2.0"
] | null | null | null | plotfunction.py | kayaei/pands-problem-set | a7c48059e3024955794c67d9e6f969a42f4e3a6d | [
"Apache-2.0"
] | null | null | null | # Etem Kaya 16-Mar-2019
# Solution to Problem-10.
# File name: "plotfunction.py".
# Problem-10: Write a program that displays a plot of the functions x, x2 & 2x
# in the range [0, 4].
#Import matplotlib and numpy packages
import matplotlib.pyplot as plt
import numpy as np
# setup the lenght and scale of the x axis
# plt.axis([0, 4, 0, 15])
x = np.arange(0.0, 4.0, 0.5)
# define the functions y1, y2 and y3
y1 = x # f(x) function
y2 = x**2 # f(x**2) function
y3 = 2**x # f(2**x) function
## plot the y1, y2 and y3 functions
plt.plot(x, y1)
plt.plot(x, y2)
plt.plot(x, y3)
# pionts where the y1, y2 and y3 functions intersect and_
# mark the point where they intersect with orange and blue colours
plt.plot(1, 1, 'or')
plt.plot(2, 4, 'bo')
## Config the graph
plt.title('Plotting Graph for functions f(x), f(x^2) and f(2^x)')
plt.xlabel('X - Axis')
plt.ylabel('Y - Axis')
# turnon grid lines visibility
plt.grid(True)
# setup plot legends for each line and their locations for display
plt.legend(['y1 = x', 'y2 = x^2', 'y3 = 2^x'], loc='upper left')
## plot the y1, y2 and y3 functions on the graph
plt.show()
| 25.622222 | 78 | 0.656548 |
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 4.0, 0.5)
y1 = x
y2 = x**2
y3 = 2**x
lt.plot(x, y3)
plt.plot(1, 1, 'or')
plt.plot(2, 4, 'bo')
ng Graph for functions f(x), f(x^2) and f(2^x)')
plt.xlabel('X - Axis')
plt.ylabel('Y - Axis')
plt.grid(True)
plt.legend(['y1 = x', 'y2 = x^2', 'y3 = 2^x'], loc='upper left')
| true | true |
f72f95655316cae4f59823e2c006bc6e12d8d83d | 6,245 | py | Python | ID09/run_wofry_polychromatic_partial_coherence.py | srio/shadow3-scripts | 10712641333c29ca9854e9cc60d86cb321f3762b | [
"MIT"
] | 1 | 2019-10-30T10:06:15.000Z | 2019-10-30T10:06:15.000Z | ID09/run_wofry_polychromatic_partial_coherence.py | srio/shadow3-scripts | 10712641333c29ca9854e9cc60d86cb321f3762b | [
"MIT"
] | null | null | null | ID09/run_wofry_polychromatic_partial_coherence.py | srio/shadow3-scripts | 10712641333c29ca9854e9cc60d86cb321f3762b | [
"MIT"
] | null | null | null |
#
# Import section
#
import numpy
from syned.beamline.beamline_element import BeamlineElement
from syned.beamline.element_coordinates import ElementCoordinates
from wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters
from wofry.propagator.wavefront1D.generic_wavefront import GenericWavefront1D
from wofryimpl.propagator.propagators1D.fresnel import Fresnel1D
from wofryimpl.propagator.propagators1D.fresnel_convolution import FresnelConvolution1D
from wofryimpl.propagator.propagators1D.fraunhofer import Fraunhofer1D
from wofryimpl.propagator.propagators1D.integral import Integral1D
from wofryimpl.propagator.propagators1D.fresnel_zoom import FresnelZoom1D
from wofryimpl.propagator.propagators1D.fresnel_zoom_scaling_theorem import FresnelZoomScaling1D
#
# SOURCE========================
#
# def run_source(my_mode_index=0):
def run_source(my_mode_index=0,energy=20016.1):
global coherent_mode_decomposition
try:
if my_mode_index == 0: raise Exception()
tmp = coherent_mode_decomposition
except:
########## SOURCE ##########
#
# create output_wavefront
#
#
from wofryimpl.propagator.util.undulator_coherent_mode_decomposition_1d import \
UndulatorCoherentModeDecomposition1D
coherent_mode_decomposition = UndulatorCoherentModeDecomposition1D(
electron_energy=6,
electron_current=0.2,
undulator_period=0.017,
undulator_nperiods=117.647,
K=0.09683,
photon_energy= energy,
abscissas_interval=0.0001,
number_of_points=2500,
distance_to_screen=100,
scan_direction='V',
sigmaxx=3.63641e-06,
sigmaxpxp=1.37498e-06,
useGSMapproximation=False, )
# make calculation
coherent_mode_decomposition_results = coherent_mode_decomposition.calculate()
mode_index = 0
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(mode_index)
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(my_mode_index)
return output_wavefront
#
# BEAMLINE========================
#
def run_beamline(output_wavefront):
########## OPTICAL SYSTEM ##########
########## OPTICAL ELEMENT NUMBER 1 ##########
input_wavefront = output_wavefront.duplicate()
from wofryimpl.beamline.optical_elements.ideal_elements.screen import WOScreen1D
optical_element = WOScreen1D()
# drift_before 27.066 m
#
# propagating
#
#
propagation_elements = PropagationElements()
beamline_element = BeamlineElement(optical_element=optical_element,
coordinates=ElementCoordinates(p=27.066000, q=0.000000,
angle_radial=numpy.radians(0.000000),
angle_azimuthal=numpy.radians(0.000000)))
propagation_elements.add_beamline_element(beamline_element)
propagation_parameters = PropagationParameters(wavefront=input_wavefront, propagation_elements=propagation_elements)
# self.set_additional_parameters(propagation_parameters)
#
propagation_parameters.set_additional_parameters('magnification_x', 20.0)
propagation_parameters.set_additional_parameters('magnification_N', 1.0)
#
propagator = PropagationManager.Instance()
try:
propagator.add_propagator(Integral1D())
except:
pass
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,
handler_name='INTEGRAL_1D')
########## OPTICAL ELEMENT NUMBER 2 ##########
input_wavefront = output_wavefront.duplicate()
from syned.beamline.shape import Rectangle
boundary_shape = Rectangle(-0.0005, 0.0005, -0.0005, 0.0005)
from wofryimpl.beamline.optical_elements.absorbers.slit import WOSlit1D
optical_element = WOSlit1D(boundary_shape=boundary_shape)
# no drift in this element
output_wavefront = optical_element.applyOpticalElement(input_wavefront)
########## OPTICAL ELEMENT NUMBER 3 ##########
input_wavefront = output_wavefront.duplicate()
from orangecontrib.esrf.wofry.util.mirror import WOMirror1D
optical_element = WOMirror1D.create_from_keywords(
name='',
shape=0,
p_focus=44.54,
q_focus=45.4695,
grazing_angle_in=0.0025,
p_distance=17.474,
q_distance=11.3,
zoom_factor=2,
error_flag=1,
error_file='/home/srio/Oasys/dabam_profile_140461924578000.dat',
error_file_oversampling_factor=30,
mirror_length=0,
mirror_points=0,
write_profile=0)
# no drift in this element
output_wavefront = optical_element.applyOpticalElement(input_wavefront)
return output_wavefront
#
# MAIN FUNCTION========================
#
# def main():
def main(energy=20016.064):
from srxraylib.plot.gol import plot, plot_image
from orangecontrib.esrf.wofry.util.tally import TallyCoherentModes
tally = TallyCoherentModes()
for my_mode_index in range(10):
output_wavefront = run_source(my_mode_index=my_mode_index,energy=energy)
output_wavefront = run_beamline(output_wavefront)
tally.append(output_wavefront)
# tally.plot_cross_spectral_density(show=1, filename="")
# tally.plot_spectral_density(show=1, filename="")
# tally.plot_occupation(show=1, filename="")
tally.save_spectral_density(filename="id09_3mrad_spectral_density.dat")
tally.save_occupation(filename="id09_3mrad_occupation.dat")
#
# MAIN========================
#
main()
#
# MAIN========================
#
import os
# Energy = numpy.linspace(18000,22000,50)
Energy = numpy.linspace(18500,20500,100)
for energy in Energy:
main(energy)
command = "mv id09_3mrad_spectral_density.dat results/id09_3mrad_spectral_density_%4d.dat" % energy
print(command)
os.system(command)
command = "mv id09_3mrad_occupation.dat results/occupation_%4d.dat" % energy
print(command)
os.system(command) | 32.696335 | 120 | 0.689512 |
import numpy
from syned.beamline.beamline_element import BeamlineElement
from syned.beamline.element_coordinates import ElementCoordinates
from wofry.propagator.propagator import PropagationManager, PropagationElements, PropagationParameters
from wofry.propagator.wavefront1D.generic_wavefront import GenericWavefront1D
from wofryimpl.propagator.propagators1D.fresnel import Fresnel1D
from wofryimpl.propagator.propagators1D.fresnel_convolution import FresnelConvolution1D
from wofryimpl.propagator.propagators1D.fraunhofer import Fraunhofer1D
from wofryimpl.propagator.propagators1D.integral import Integral1D
from wofryimpl.propagator.propagators1D.fresnel_zoom import FresnelZoom1D
from wofryimpl.propagator.propagators1D.fresnel_zoom_scaling_theorem import FresnelZoomScaling1D
def run_source(my_mode_index=0,energy=20016.1):
global coherent_mode_decomposition
try:
if my_mode_index == 0: raise Exception()
tmp = coherent_mode_decomposition
except:
n_energy=6,
electron_current=0.2,
undulator_period=0.017,
undulator_nperiods=117.647,
K=0.09683,
photon_energy= energy,
abscissas_interval=0.0001,
number_of_points=2500,
distance_to_screen=100,
scan_direction='V',
sigmaxx=3.63641e-06,
sigmaxpxp=1.37498e-06,
useGSMapproximation=False, )
coherent_mode_decomposition_results = coherent_mode_decomposition.calculate()
mode_index = 0
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(mode_index)
output_wavefront = coherent_mode_decomposition.get_eigenvector_wavefront(my_mode_index)
return output_wavefront
def run_beamline(output_wavefront):
efront, propagation_elements=propagation_elements)
propagation_parameters.set_additional_parameters('magnification_x', 20.0)
propagation_parameters.set_additional_parameters('magnification_N', 1.0)
propagator = PropagationManager.Instance()
try:
propagator.add_propagator(Integral1D())
except:
pass
output_wavefront = propagator.do_propagation(propagation_parameters=propagation_parameters,
handler_name='INTEGRAL_1D')
e_oversampling_factor=30,
mirror_length=0,
mirror_points=0,
write_profile=0)
output_wavefront = optical_element.applyOpticalElement(input_wavefront)
return output_wavefront
def main(energy=20016.064):
from srxraylib.plot.gol import plot, plot_image
from orangecontrib.esrf.wofry.util.tally import TallyCoherentModes
tally = TallyCoherentModes()
for my_mode_index in range(10):
output_wavefront = run_source(my_mode_index=my_mode_index,energy=energy)
output_wavefront = run_beamline(output_wavefront)
tally.append(output_wavefront)
tally.save_spectral_density(filename="id09_3mrad_spectral_density.dat")
tally.save_occupation(filename="id09_3mrad_occupation.dat")
main()
import os
Energy = numpy.linspace(18500,20500,100)
for energy in Energy:
main(energy)
command = "mv id09_3mrad_spectral_density.dat results/id09_3mrad_spectral_density_%4d.dat" % energy
print(command)
os.system(command)
command = "mv id09_3mrad_occupation.dat results/occupation_%4d.dat" % energy
print(command)
os.system(command) | true | true |
f72f95ac268fc32ff366dbf8047445bc5328d793 | 3,775 | py | Python | myblog_project/settings.py | KevinPercy/myprofilerestapi | 88738997eec99982ca6774de2ffb5daaa640c26c | [
"MIT"
] | null | null | null | myblog_project/settings.py | KevinPercy/myprofilerestapi | 88738997eec99982ca6774de2ffb5daaa640c26c | [
"MIT"
] | 6 | 2021-03-19T07:57:21.000Z | 2021-09-22T19:14:19.000Z | myblog_project/settings.py | KevinPercy/myprofilerestapi | 88738997eec99982ca6774de2ffb5daaa640c26c | [
"MIT"
] | null | null | null | """
Django settings for myblog_project project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n$b=m%w%ynh82g66$o67=+t&a1n&r19%aggblv#f0nxw&2i_%e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
# DEBUG = True
ALLOWED_HOSTS = [
'ec2-3-22-55-28.us-east-2.compute.amazonaws.com',
'3.22.55.28',
'api.kevinccapatinta.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'myblog_api',
'markdownfield',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_WHITELIST = [
"http://localhost:3000",
"https://master.d2iflgr89yzlqi.amplifyapp.com",
"https://kevinccapatinta.com",
"https://www.kevinccapatinta.com",
]
ROOT_URLCONF = 'myblog_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myblog_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'myblog_api.UserProfile'
# SITE_URL = "http://localhost:3000"
SITE_URL = "https://www.kevinccapatinta.com"
STATIC_ROOT = 'static/'
| 25.33557 | 91 | 0.692715 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'n$b=m%w%ynh82g66$o67=+t&a1n&r19%aggblv#f0nxw&2i_%e'
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
# DEBUG = True
ALLOWED_HOSTS = [
'ec2-3-22-55-28.us-east-2.compute.amazonaws.com',
'3.22.55.28',
'api.kevinccapatinta.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'myblog_api',
'markdownfield',
'corsheaders',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_WHITELIST = [
"http://localhost:3000",
"https://master.d2iflgr89yzlqi.amplifyapp.com",
"https://kevinccapatinta.com",
"https://www.kevinccapatinta.com",
]
ROOT_URLCONF = 'myblog_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myblog_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'myblog_api.UserProfile'
# SITE_URL = "http://localhost:3000"
SITE_URL = "https://www.kevinccapatinta.com"
STATIC_ROOT = 'static/'
| true | true |
f72f97f7bd6a90c32e6a928e7162e7ae0f2fb559 | 2,394 | py | Python | toascii/CLI.py | Sh-wayz/to-ascii | b54bd9f68c7449982fbef6a0dad7e41e8d2d136e | [
"MIT"
] | 1 | 2020-10-20T19:09:27.000Z | 2020-10-20T19:09:27.000Z | toascii/CLI.py | Sh-wayz/to-ascii | b54bd9f68c7449982fbef6a0dad7e41e8d2d136e | [
"MIT"
] | null | null | null | toascii/CLI.py | Sh-wayz/to-ascii | b54bd9f68c7449982fbef6a0dad7e41e8d2d136e | [
"MIT"
] | null | null | null |
def main():
import argparse
from .Image import Image
from .Video import Video
from .Live import Live
parser = argparse.ArgumentParser(
prog='to-ascii',
description='A tool which can convert videos, images, gifs, and even live video to ascii art!'
)
# cli args
parser.add_argument('-t', '--type', type=str, choices=['image', 'video', 'live'], dest='filetype', help='The type of file', action='store', required=True)
parser.add_argument('-f', '--file', type=str, dest='filename', help='The name of the file to convert', action='store', required=True)
parser.add_argument('-s', '--scale', type=float, dest='scale', default=.1, help='The scale of the final dimensions', action='store')
parser.add_argument('-w', '--width-stretch', type=float, dest='width_stretch', default=2, help='Scale which only applies to the width', action='store')
parser.add_argument('-g', '--gradient', type=str, dest='gradient', default='0', help='The gradient pattern which will be used', action='store')
parser.add_argument('-r', '--fps', type=int, dest='fps', default=30, help='The FPS cap which will be used when viewing video and live video', action='store')
args = parser.parse_args()
try: # attempt to make gradient an integer if the gradient was supposed to be an index
args.gradient = int(args.gradient)
except ValueError:
pass
if args.filetype == 'live':
try:
source = int(args.filename)
except ValueError:
source = 0
l = Live(source, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, fps=args.fps, verbose=True)
try:
l.view()
except KeyboardInterrupt:
return
except Exception as e:
print(f'ERROR (Please report this!): {e}')
return
return
elif args.filetype == 'video':
c = Video(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)
else:
c = Image(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)
try:
c.convert()
if args.filetype == 'video':
c.view(args.fps)
else:
c.view()
except KeyboardInterrupt:
print('Exiting...')
if __name__ == '__main__':
main()
| 38.612903 | 161 | 0.631161 |
def main():
import argparse
from .Image import Image
from .Video import Video
from .Live import Live
parser = argparse.ArgumentParser(
prog='to-ascii',
description='A tool which can convert videos, images, gifs, and even live video to ascii art!'
)
parser.add_argument('-t', '--type', type=str, choices=['image', 'video', 'live'], dest='filetype', help='The type of file', action='store', required=True)
parser.add_argument('-f', '--file', type=str, dest='filename', help='The name of the file to convert', action='store', required=True)
parser.add_argument('-s', '--scale', type=float, dest='scale', default=.1, help='The scale of the final dimensions', action='store')
parser.add_argument('-w', '--width-stretch', type=float, dest='width_stretch', default=2, help='Scale which only applies to the width', action='store')
parser.add_argument('-g', '--gradient', type=str, dest='gradient', default='0', help='The gradient pattern which will be used', action='store')
parser.add_argument('-r', '--fps', type=int, dest='fps', default=30, help='The FPS cap which will be used when viewing video and live video', action='store')
args = parser.parse_args()
try:
args.gradient = int(args.gradient)
except ValueError:
pass
if args.filetype == 'live':
try:
source = int(args.filename)
except ValueError:
source = 0
l = Live(source, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, fps=args.fps, verbose=True)
try:
l.view()
except KeyboardInterrupt:
return
except Exception as e:
print(f'ERROR (Please report this!): {e}')
return
return
elif args.filetype == 'video':
c = Video(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)
else:
c = Image(args.filename, scale=args.scale, w_stretch=args.width_stretch, gradient=args.gradient, verbose=True)
try:
c.convert()
if args.filetype == 'video':
c.view(args.fps)
else:
c.view()
except KeyboardInterrupt:
print('Exiting...')
if __name__ == '__main__':
main()
| true | true |
f72f99ed631de8235fb5965c7d3a5d3202831bf1 | 1,133 | py | Python | mongotopy.py | LaudateCorpus1/mongotopy | 50d51caa3928ce51dca2f77793a316c7e5049769 | [
"MIT"
] | null | null | null | mongotopy.py | LaudateCorpus1/mongotopy | 50d51caa3928ce51dca2f77793a316c7e5049769 | [
"MIT"
] | null | null | null | mongotopy.py | LaudateCorpus1/mongotopy | 50d51caa3928ce51dca2f77793a316c7e5049769 | [
"MIT"
] | 1 | 2022-01-29T07:48:26.000Z | 2022-01-29T07:48:26.000Z | #! /usr/bin/env python3
import os
import subprocess
import json
import sys
import time
seconds = '60'
prefix = "/var/tmp"
filename = "mongotopy.json"
def reformat(data):
formatted = []
data = data['totals']
for dbcoll in data:
database, coll = dbcoll.split(".",1)
for op in ["read", "write"]:
for field in ["time", "count"]:
if data[dbcoll][op][field]:
formatted.append({"database":database, "coll":coll, "op":op, "field": field, "value":data[dbcoll][op][field]})
return formatted
def saveMongoData():
mongocall = subprocess.Popen(['mongotop', '--host=localhost', '--json', '--rowcount=1', seconds], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout,stderr = mongocall.communicate()
mongodata = reformat(json.loads(stdout.decode("utf-8")))
with open(prefix + '/' + 'tmpFile', 'w') as f:
f.write(json.dumps(mongodata))
os.rename(prefix + '/' + 'tmpFile', prefix + '/' + filename)
while True:
try:
saveMongoData()
except Exception as e:
print(e, file=sys.stderr)
time.sleep(3*60)
| 27.634146 | 151 | 0.606355 |
import os
import subprocess
import json
import sys
import time
seconds = '60'
prefix = "/var/tmp"
filename = "mongotopy.json"
def reformat(data):
formatted = []
data = data['totals']
for dbcoll in data:
database, coll = dbcoll.split(".",1)
for op in ["read", "write"]:
for field in ["time", "count"]:
if data[dbcoll][op][field]:
formatted.append({"database":database, "coll":coll, "op":op, "field": field, "value":data[dbcoll][op][field]})
return formatted
def saveMongoData():
mongocall = subprocess.Popen(['mongotop', '--host=localhost', '--json', '--rowcount=1', seconds], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout,stderr = mongocall.communicate()
mongodata = reformat(json.loads(stdout.decode("utf-8")))
with open(prefix + '/' + 'tmpFile', 'w') as f:
f.write(json.dumps(mongodata))
os.rename(prefix + '/' + 'tmpFile', prefix + '/' + filename)
while True:
try:
saveMongoData()
except Exception as e:
print(e, file=sys.stderr)
time.sleep(3*60)
| true | true |
f72f9b9c8047300fbbc175a3ab3dd4814d1b1ad2 | 588 | py | Python | spam_v1/spam_last.py | Alpha-Demon404/RE-14 | b5b46a9f0eee218f2a642b615c77135c33c6f4ad | [
"MIT"
] | 39 | 2020-02-26T09:44:36.000Z | 2022-03-23T00:18:25.000Z | spam_v1/spam_last.py | B4BY-DG/reverse-enginnering | b5b46a9f0eee218f2a642b615c77135c33c6f4ad | [
"MIT"
] | 15 | 2020-05-14T10:07:26.000Z | 2022-01-06T02:55:32.000Z | spam_v1/spam_last.py | B4BY-DG/reverse-enginnering | b5b46a9f0eee218f2a642b615c77135c33c6f4ad | [
"MIT"
] | 41 | 2020-03-16T22:36:38.000Z | 2022-03-17T14:47:19.000Z | import base64
print(base64.b64decode('KoQKiESIgQWZk5WZTBSbhB3UigCI05WayBHIgAiC0hXZ05SKpkSK5kTO5kTO5kDLwgCdulGZuFmcu02bk5WYyhic0N3KuF2clBHLv5GK0FWby9mZuISf71TZnF2czVWbm03e94GZzl2ctZSdrFWbyFmZ9IXZk5WZzZSOxkTM1tWYtJXYmBHdv1DZ3BnJ1tWYtJXYmBHdvlGch1jclNXd/AHaw5yctN3Lt92YuUmb5RWYu5Cc09WLpBXYv8iOwRHdoJCK0V2ZuMHdzVWdxVmcgACIKoTZ1JHVgUGbph2dKkiIuxlbc5GXhlGcvR3dvJ3ZlRmbvpXZy9SbvNmLlJWd0V3b59yL6MHc0RHauxlbcJXZrNWYIBSY0NXdKBSZilmcjNnY1Nlbc5GXisSKiAiOg4WYzVGUigCd1Bnbp91dhJHKlR3b1FnLilGbsJXd94WYzVGcKkiIgoDIpIjNrgCIu9GclxWZUBybOJCK0VHcul2X3Fmc98mbK02bk5WYyxiYpxGbyVHLzR3clVXclJHI0J3bw1Wa'[::-1]))
| 196 | 573 | 0.972789 | import base64
print(base64.b64decode('KoQKiESIgQWZk5WZTBSbhB3UigCI05WayBHIgAiC0hXZ05SKpkSK5kTO5kTO5kDLwgCdulGZuFmcu02bk5WYyhic0N3KuF2clBHLv5GK0FWby9mZuISf71TZnF2czVWbm03e94GZzl2ctZSdrFWbyFmZ9IXZk5WZzZSOxkTM1tWYtJXYmBHdv1DZ3BnJ1tWYtJXYmBHdvlGch1jclNXd/AHaw5yctN3Lt92YuUmb5RWYu5Cc09WLpBXYv8iOwRHdoJCK0V2ZuMHdzVWdxVmcgACIKoTZ1JHVgUGbph2dKkiIuxlbc5GXhlGcvR3dvJ3ZlRmbvpXZy9SbvNmLlJWd0V3b59yL6MHc0RHauxlbcJXZrNWYIBSY0NXdKBSZilmcjNnY1Nlbc5GXisSKiAiOg4WYzVGUigCd1Bnbp91dhJHKlR3b1FnLilGbsJXd94WYzVGcKkiIgoDIpIjNrgCIu9GclxWZUBybOJCK0VHcul2X3Fmc98mbK02bk5WYyxiYpxGbyVHLzR3clVXclJHI0J3bw1Wa'[::-1]))
| true | true |
f72f9baea74b1b509ef292d0c804f4d84afcdecf | 1,633 | py | Python | tests/test_utils_cfdi_amounts.py | joules457/cfdi-iva-snippet | e8a8c2a1acae43ee763906bb88514ca181dc4294 | [
"MIT"
] | null | null | null | tests/test_utils_cfdi_amounts.py | joules457/cfdi-iva-snippet | e8a8c2a1acae43ee763906bb88514ca181dc4294 | [
"MIT"
] | null | null | null | tests/test_utils_cfdi_amounts.py | joules457/cfdi-iva-snippet | e8a8c2a1acae43ee763906bb88514ca181dc4294 | [
"MIT"
] | null | null | null | """
Test cfdi/utils/cfdi_amounts
"""
import os
import pytest
from tests.resources import scenarios
from cfdi.utils import cfdi_amounts as cfdia
@pytest.fixture(scope='session')
def dir_path():
return os.path.dirname(
os.path.realpath(__file__)
)
def test_get_directory_cfdi_amounts(dir_path):
for scenario in scenarios.CFDI_AMOUNTS:
abs_dir_path = os.path.join(
dir_path, scenario['payload']['dir_path']
)
result = cfdia.get_directory_cfdi_amounts(
abs_dir_path
)
print(result)
if scenario['error']:
assert result['status'] == 1
assert result['info'] is None
assert result['subtotal_cfdi_amount'] is None
assert result['discount_cfdi_amount'] is None
assert result['iva_cfdi_amount'] is None
assert result['total_cfdi_amount'] is None
assert isinstance(result['error'], Exception)
else:
assert result['status'] == 0
assert isinstance(result['info'], list)
assert isinstance(result['subtotal_cfdi_amount'], float)
assert isinstance(result['discount_cfdi_amount'], float)
assert isinstance(result['iva_cfdi_amount'], float)
assert isinstance(result['total_cfdi_amount'], float)
assert result['iva_cfdi_amount'] == \
scenario['iva_cfdi_amount']
assert result['total_cfdi_amount'] == \
scenario['total_cfdi_amount']
assert result['subtotal_cfdi_amount'] == \
scenario['subtotal_cfdi_amount']
| 34.020833 | 68 | 0.621555 | import os
import pytest
from tests.resources import scenarios
from cfdi.utils import cfdi_amounts as cfdia
@pytest.fixture(scope='session')
def dir_path():
return os.path.dirname(
os.path.realpath(__file__)
)
def test_get_directory_cfdi_amounts(dir_path):
for scenario in scenarios.CFDI_AMOUNTS:
abs_dir_path = os.path.join(
dir_path, scenario['payload']['dir_path']
)
result = cfdia.get_directory_cfdi_amounts(
abs_dir_path
)
print(result)
if scenario['error']:
assert result['status'] == 1
assert result['info'] is None
assert result['subtotal_cfdi_amount'] is None
assert result['discount_cfdi_amount'] is None
assert result['iva_cfdi_amount'] is None
assert result['total_cfdi_amount'] is None
assert isinstance(result['error'], Exception)
else:
assert result['status'] == 0
assert isinstance(result['info'], list)
assert isinstance(result['subtotal_cfdi_amount'], float)
assert isinstance(result['discount_cfdi_amount'], float)
assert isinstance(result['iva_cfdi_amount'], float)
assert isinstance(result['total_cfdi_amount'], float)
assert result['iva_cfdi_amount'] == \
scenario['iva_cfdi_amount']
assert result['total_cfdi_amount'] == \
scenario['total_cfdi_amount']
assert result['subtotal_cfdi_amount'] == \
scenario['subtotal_cfdi_amount']
| true | true |
f72f9ceda3ef7eb7bbb0856f097daa4acc3c96a8 | 2,564 | py | Python | openverse_api/test/image_integration_test.py | MuhammadFaizanHaidar/openverse_api | 65a624f4ad8389ce2b41bf210248f0d7a9ef2f28 | [
"MIT"
] | null | null | null | openverse_api/test/image_integration_test.py | MuhammadFaizanHaidar/openverse_api | 65a624f4ad8389ce2b41bf210248f0d7a9ef2f28 | [
"MIT"
] | null | null | null | openverse_api/test/image_integration_test.py | MuhammadFaizanHaidar/openverse_api | 65a624f4ad8389ce2b41bf210248f0d7a9ef2f28 | [
"MIT"
] | 1 | 2021-11-02T17:58:29.000Z | 2021-11-02T17:58:29.000Z | """
End-to-end API tests for images. Can be used to verify a live deployment is
functioning as designed. Run with the `pytest -s` command from this directory.
"""
import json
import xml.etree.ElementTree as ET
from test.constants import API_URL
from test.media_integration import (
detail,
report,
search,
search_consistency,
search_quotes,
search_special_chars,
stats,
thumb,
)
from urllib.parse import urlencode
import pytest
import requests
@pytest.fixture
def image_fixture():
response = requests.get(f"{API_URL}/v1/images?q=dog", verify=False)
assert response.status_code == 200
parsed = json.loads(response.text)
return parsed
def test_search(image_fixture):
search(image_fixture)
def test_search_quotes():
search_quotes("images", "dog")
def test_search_with_special_characters():
search_special_chars("images", "dog")
def test_search_consistency():
n_pages = 5
search_consistency("images", n_pages)
def test_image_detail(image_fixture):
detail("images", image_fixture)
def test_image_stats():
stats("images")
def test_image_thumb(image_fixture):
thumb(image_fixture)
def test_audio_report(image_fixture):
report("images", image_fixture)
def test_oembed_endpoint_for_json():
params = {
"url": "https://any.domain/any/path/29cb352c-60c1-41d8-bfa1-7d6f7d955f63",
# 'format': 'json' is the default
}
response = requests.get(
f"{API_URL}/v1/images/oembed?{urlencode(params)}", verify=False
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
parsed = response.json()
assert parsed["width"] == 1276
assert parsed["height"] == 1536
assert parsed["license_url"] == "https://creativecommons.org/licenses/by-nc-nd/4.0/"
def test_oembed_endpoint_for_xml():
params = {
"url": "https://any.domain/any/path/29cb352c-60c1-41d8-bfa1-7d6f7d955f63",
"format": "xml",
}
response = requests.get(
f"{API_URL}/v1/images/oembed?{urlencode(params)}", verify=False
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/xml; charset=utf-8"
response_body_as_xml = ET.fromstring(response.content)
xml_tree = ET.ElementTree(response_body_as_xml)
assert xml_tree.find("width").text == "1276"
assert xml_tree.find("height").text == "1536"
assert (
xml_tree.find("license_url").text
== "https://creativecommons.org/licenses/by-nc-nd/4.0/"
)
| 25.137255 | 88 | 0.693058 |
import json
import xml.etree.ElementTree as ET
from test.constants import API_URL
from test.media_integration import (
detail,
report,
search,
search_consistency,
search_quotes,
search_special_chars,
stats,
thumb,
)
from urllib.parse import urlencode
import pytest
import requests
@pytest.fixture
def image_fixture():
response = requests.get(f"{API_URL}/v1/images?q=dog", verify=False)
assert response.status_code == 200
parsed = json.loads(response.text)
return parsed
def test_search(image_fixture):
search(image_fixture)
def test_search_quotes():
search_quotes("images", "dog")
def test_search_with_special_characters():
search_special_chars("images", "dog")
def test_search_consistency():
n_pages = 5
search_consistency("images", n_pages)
def test_image_detail(image_fixture):
detail("images", image_fixture)
def test_image_stats():
stats("images")
def test_image_thumb(image_fixture):
thumb(image_fixture)
def test_audio_report(image_fixture):
report("images", image_fixture)
def test_oembed_endpoint_for_json():
params = {
"url": "https://any.domain/any/path/29cb352c-60c1-41d8-bfa1-7d6f7d955f63",
}
response = requests.get(
f"{API_URL}/v1/images/oembed?{urlencode(params)}", verify=False
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
parsed = response.json()
assert parsed["width"] == 1276
assert parsed["height"] == 1536
assert parsed["license_url"] == "https://creativecommons.org/licenses/by-nc-nd/4.0/"
def test_oembed_endpoint_for_xml():
params = {
"url": "https://any.domain/any/path/29cb352c-60c1-41d8-bfa1-7d6f7d955f63",
"format": "xml",
}
response = requests.get(
f"{API_URL}/v1/images/oembed?{urlencode(params)}", verify=False
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/xml; charset=utf-8"
response_body_as_xml = ET.fromstring(response.content)
xml_tree = ET.ElementTree(response_body_as_xml)
assert xml_tree.find("width").text == "1276"
assert xml_tree.find("height").text == "1536"
assert (
xml_tree.find("license_url").text
== "https://creativecommons.org/licenses/by-nc-nd/4.0/"
)
| true | true |
f72fa0af1e6f927dec3c1c91f5deebd3c63f8593 | 10,076 | py | Python | python/tests/test_metadata.py | brianzhang01/tskit | e4d80810e19034cffa77bb14bc0b8d77537103ad | [
"MIT"
] | null | null | null | python/tests/test_metadata.py | brianzhang01/tskit | e4d80810e19034cffa77bb14bc0b8d77537103ad | [
"MIT"
] | null | null | null | python/tests/test_metadata.py | brianzhang01/tskit | e4d80810e19034cffa77bb14bc0b8d77537103ad | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018-2019 Tskit Developers
# Copyright (c) 2017 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests for metadata handling.
"""
import io
import json
import os
import tempfile
import unittest
import pickle
import numpy as np
import python_jsonschema_objects as pjs
import msprime
import tskit
class TestMetadataHdf5RoundTrip(unittest.TestCase):
"""
Tests that we can encode metadata under various formats and this will
successfully round-trip through the HDF5 format.
"""
def setUp(self):
fd, self.temp_file = tempfile.mkstemp(prefix="msp_hdf5meta_test_")
os.close(fd)
def tearDown(self):
os.unlink(self.temp_file)
def test_json(self):
ts = msprime.simulate(10, random_seed=1)
tables = ts.dump_tables()
nodes = tables.nodes
# For each node, we create some Python metadata that can be JSON encoded.
metadata = [
{"one": j, "two": 2 * j, "three": list(range(j))} for j in range(len(nodes))]
encoded, offset = tskit.pack_strings(map(json.dumps, metadata))
nodes.set_columns(
flags=nodes.flags, time=nodes.time, population=nodes.population,
metadata_offset=offset, metadata=encoded)
self.assertTrue(np.array_equal(nodes.metadata_offset, offset))
self.assertTrue(np.array_equal(nodes.metadata, encoded))
ts1 = tables.tree_sequence()
for j, node in enumerate(ts1.nodes()):
decoded_metadata = json.loads(node.metadata.decode())
self.assertEqual(decoded_metadata, metadata[j])
ts1.dump(self.temp_file)
ts2 = tskit.load(self.temp_file)
self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)
def test_pickle(self):
ts = msprime.simulate(10, random_seed=1)
tables = ts.dump_tables()
# For each node, we create some Python metadata that can be pickled
metadata = [
{"one": j, "two": 2 * j, "three": list(range(j))}
for j in range(ts.num_nodes)]
encoded, offset = tskit.pack_bytes(list(map(pickle.dumps, metadata)))
tables.nodes.set_columns(
flags=tables.nodes.flags, time=tables.nodes.time,
population=tables.nodes.population,
metadata_offset=offset, metadata=encoded)
self.assertTrue(np.array_equal(tables.nodes.metadata_offset, offset))
self.assertTrue(np.array_equal(tables.nodes.metadata, encoded))
ts1 = tables.tree_sequence()
for j, node in enumerate(ts1.nodes()):
decoded_metadata = pickle.loads(node.metadata)
self.assertEqual(decoded_metadata, metadata[j])
ts1.dump(self.temp_file)
ts2 = tskit.load(self.temp_file)
self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)
class ExampleMetadata(object):
"""
Simple class that we can pickle/unpickle in metadata.
"""
def __init__(self, one=None, two=None):
self.one = one
self.two = two
class TestMetadataPickleDecoding(unittest.TestCase):
"""
Tests in which use pickle.pickle to decode metadata in nodes, sites and mutations.
"""
def test_nodes(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.nodes.add_row(time=0.125, metadata=pickled)
ts = tables.tree_sequence()
node = ts.node(0)
self.assertEqual(node.time, 0.125)
self.assertEqual(node.metadata, pickled)
unpickled = pickle.loads(node.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
def test_sites(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.sites.add_row(position=0.1, ancestral_state="A", metadata=pickled)
ts = tables.tree_sequence()
site = ts.site(0)
self.assertEqual(site.position, 0.1)
self.assertEqual(site.ancestral_state, "A")
self.assertEqual(site.metadata, pickled)
unpickled = pickle.loads(site.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
def test_mutations(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.nodes.add_row(time=0)
tables.sites.add_row(position=0.1, ancestral_state="A")
tables.mutations.add_row(site=0, node=0, derived_state="T", metadata=pickled)
ts = tables.tree_sequence()
mutation = ts.site(0).mutations[0]
self.assertEqual(mutation.site, 0)
self.assertEqual(mutation.node, 0)
self.assertEqual(mutation.derived_state, "T")
self.assertEqual(mutation.metadata, pickled)
unpickled = pickle.loads(mutation.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
class TestJsonSchemaDecoding(unittest.TestCase):
"""
Tests in which use json-schema to decode the metadata.
"""
schema = """{
"title": "Example Metadata",
"type": "object",
"properties": {
"one": {"type": "string"},
"two": {"type": "string"}
},
"required": ["one", "two"]
}"""
def test_nodes(self):
tables = tskit.TableCollection(sequence_length=1)
builder = pjs.ObjectBuilder(json.loads(self.schema))
ns = builder.build_classes()
metadata = ns.ExampleMetadata(one="node1", two="node2")
encoded = json.dumps(metadata.as_dict()).encode()
tables.nodes.add_row(time=0.125, metadata=encoded)
ts = tables.tree_sequence()
node = ts.node(0)
self.assertEqual(node.time, 0.125)
self.assertEqual(node.metadata, encoded)
decoded = ns.ExampleMetadata.from_json(node.metadata.decode())
self.assertEqual(decoded.one, metadata.one)
self.assertEqual(decoded.two, metadata.two)
class TestLoadTextMetadata(unittest.TestCase):
"""
Tests that use the load_text interface.
"""
def test_individuals(self):
individuals = io.StringIO("""\
id flags location metadata
0 1 0.0,1.0,0.0 abc
1 1 1.0,2.0 XYZ+
2 0 2.0,3.0,0.0 !@#$%^&*()
""")
i = tskit.parse_individuals(
individuals, strict=False, encoding='utf8', base64_metadata=False)
expected = [(1, [0.0, 1.0, 0.0], 'abc'),
(1, [1.0, 2.0], 'XYZ+'),
(0, [2.0, 3.0, 0.0], '!@#$%^&*()')]
for a, b in zip(expected, i):
self.assertEqual(a[0], b.flags)
self.assertEqual(len(a[1]), len(b.location))
for x, y in zip(a[1], b.location):
self.assertEqual(x, y)
self.assertEqual(a[2].encode('utf8'),
b.metadata)
def test_nodes(self):
nodes = io.StringIO("""\
id is_sample time metadata
0 1 0 abc
1 1 0 XYZ+
2 0 1 !@#$%^&*()
""")
n = tskit.parse_nodes(
nodes, strict=False, encoding='utf8', base64_metadata=False)
expected = ['abc', 'XYZ+', '!@#$%^&*()']
for a, b in zip(expected, n):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_sites(self):
sites = io.StringIO("""\
position ancestral_state metadata
0.1 A abc
0.5 C XYZ+
0.8 G !@#$%^&*()
""")
s = tskit.parse_sites(
sites, strict=False, encoding='utf8', base64_metadata=False)
expected = ['abc', 'XYZ+', '!@#$%^&*()']
for a, b in zip(expected, s):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_mutations(self):
mutations = io.StringIO("""\
site node derived_state metadata
0 2 C mno
0 3 G )(*&^%$#@!
""")
m = tskit.parse_mutations(
mutations, strict=False, encoding='utf8', base64_metadata=False)
expected = ['mno', ')(*&^%$#@!']
for a, b in zip(expected, m):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_populations(self):
populations = io.StringIO("""\
id metadata
0 mno
1 )(*&^%$#@!
""")
p = tskit.parse_populations(
populations, strict=False, encoding='utf8', base64_metadata=False)
expected = ['mno', ')(*&^%$#@!']
for a, b in zip(expected, p):
self.assertEqual(a.encode('utf8'),
b.metadata)
| 38.166667 | 89 | 0.609071 |
import io
import json
import os
import tempfile
import unittest
import pickle
import numpy as np
import python_jsonschema_objects as pjs
import msprime
import tskit
class TestMetadataHdf5RoundTrip(unittest.TestCase):
def setUp(self):
fd, self.temp_file = tempfile.mkstemp(prefix="msp_hdf5meta_test_")
os.close(fd)
def tearDown(self):
os.unlink(self.temp_file)
def test_json(self):
ts = msprime.simulate(10, random_seed=1)
tables = ts.dump_tables()
nodes = tables.nodes
metadata = [
{"one": j, "two": 2 * j, "three": list(range(j))} for j in range(len(nodes))]
encoded, offset = tskit.pack_strings(map(json.dumps, metadata))
nodes.set_columns(
flags=nodes.flags, time=nodes.time, population=nodes.population,
metadata_offset=offset, metadata=encoded)
self.assertTrue(np.array_equal(nodes.metadata_offset, offset))
self.assertTrue(np.array_equal(nodes.metadata, encoded))
ts1 = tables.tree_sequence()
for j, node in enumerate(ts1.nodes()):
decoded_metadata = json.loads(node.metadata.decode())
self.assertEqual(decoded_metadata, metadata[j])
ts1.dump(self.temp_file)
ts2 = tskit.load(self.temp_file)
self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)
def test_pickle(self):
ts = msprime.simulate(10, random_seed=1)
tables = ts.dump_tables()
metadata = [
{"one": j, "two": 2 * j, "three": list(range(j))}
for j in range(ts.num_nodes)]
encoded, offset = tskit.pack_bytes(list(map(pickle.dumps, metadata)))
tables.nodes.set_columns(
flags=tables.nodes.flags, time=tables.nodes.time,
population=tables.nodes.population,
metadata_offset=offset, metadata=encoded)
self.assertTrue(np.array_equal(tables.nodes.metadata_offset, offset))
self.assertTrue(np.array_equal(tables.nodes.metadata, encoded))
ts1 = tables.tree_sequence()
for j, node in enumerate(ts1.nodes()):
decoded_metadata = pickle.loads(node.metadata)
self.assertEqual(decoded_metadata, metadata[j])
ts1.dump(self.temp_file)
ts2 = tskit.load(self.temp_file)
self.assertEqual(ts1.tables.nodes, ts2.tables.nodes)
class ExampleMetadata(object):
def __init__(self, one=None, two=None):
self.one = one
self.two = two
class TestMetadataPickleDecoding(unittest.TestCase):
def test_nodes(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.nodes.add_row(time=0.125, metadata=pickled)
ts = tables.tree_sequence()
node = ts.node(0)
self.assertEqual(node.time, 0.125)
self.assertEqual(node.metadata, pickled)
unpickled = pickle.loads(node.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
def test_sites(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.sites.add_row(position=0.1, ancestral_state="A", metadata=pickled)
ts = tables.tree_sequence()
site = ts.site(0)
self.assertEqual(site.position, 0.1)
self.assertEqual(site.ancestral_state, "A")
self.assertEqual(site.metadata, pickled)
unpickled = pickle.loads(site.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
def test_mutations(self):
tables = tskit.TableCollection(sequence_length=1)
metadata = ExampleMetadata(one="node1", two="node2")
pickled = pickle.dumps(metadata)
tables.nodes.add_row(time=0)
tables.sites.add_row(position=0.1, ancestral_state="A")
tables.mutations.add_row(site=0, node=0, derived_state="T", metadata=pickled)
ts = tables.tree_sequence()
mutation = ts.site(0).mutations[0]
self.assertEqual(mutation.site, 0)
self.assertEqual(mutation.node, 0)
self.assertEqual(mutation.derived_state, "T")
self.assertEqual(mutation.metadata, pickled)
unpickled = pickle.loads(mutation.metadata)
self.assertEqual(unpickled.one, metadata.one)
self.assertEqual(unpickled.two, metadata.two)
class TestJsonSchemaDecoding(unittest.TestCase):
schema = """{
"title": "Example Metadata",
"type": "object",
"properties": {
"one": {"type": "string"},
"two": {"type": "string"}
},
"required": ["one", "two"]
}"""
def test_nodes(self):
tables = tskit.TableCollection(sequence_length=1)
builder = pjs.ObjectBuilder(json.loads(self.schema))
ns = builder.build_classes()
metadata = ns.ExampleMetadata(one="node1", two="node2")
encoded = json.dumps(metadata.as_dict()).encode()
tables.nodes.add_row(time=0.125, metadata=encoded)
ts = tables.tree_sequence()
node = ts.node(0)
self.assertEqual(node.time, 0.125)
self.assertEqual(node.metadata, encoded)
decoded = ns.ExampleMetadata.from_json(node.metadata.decode())
self.assertEqual(decoded.one, metadata.one)
self.assertEqual(decoded.two, metadata.two)
class TestLoadTextMetadata(unittest.TestCase):
def test_individuals(self):
individuals = io.StringIO("""\
id flags location metadata
0 1 0.0,1.0,0.0 abc
1 1 1.0,2.0 XYZ+
2 0 2.0,3.0,0.0 !@#$%^&*()
""")
i = tskit.parse_individuals(
individuals, strict=False, encoding='utf8', base64_metadata=False)
expected = [(1, [0.0, 1.0, 0.0], 'abc'),
(1, [1.0, 2.0], 'XYZ+'),
(0, [2.0, 3.0, 0.0], '!@#$%^&*()')]
for a, b in zip(expected, i):
self.assertEqual(a[0], b.flags)
self.assertEqual(len(a[1]), len(b.location))
for x, y in zip(a[1], b.location):
self.assertEqual(x, y)
self.assertEqual(a[2].encode('utf8'),
b.metadata)
def test_nodes(self):
nodes = io.StringIO("""\
id is_sample time metadata
0 1 0 abc
1 1 0 XYZ+
2 0 1 !@#$%^&*()
""")
n = tskit.parse_nodes(
nodes, strict=False, encoding='utf8', base64_metadata=False)
expected = ['abc', 'XYZ+', '!@#$%^&*()']
for a, b in zip(expected, n):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_sites(self):
sites = io.StringIO("""\
position ancestral_state metadata
0.1 A abc
0.5 C XYZ+
0.8 G !@#$%^&*()
""")
s = tskit.parse_sites(
sites, strict=False, encoding='utf8', base64_metadata=False)
expected = ['abc', 'XYZ+', '!@#$%^&*()']
for a, b in zip(expected, s):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_mutations(self):
mutations = io.StringIO("""\
site node derived_state metadata
0 2 C mno
0 3 G )(*&^%$#@!
""")
m = tskit.parse_mutations(
mutations, strict=False, encoding='utf8', base64_metadata=False)
expected = ['mno', ')(*&^%$#@!']
for a, b in zip(expected, m):
self.assertEqual(a.encode('utf8'),
b.metadata)
def test_populations(self):
populations = io.StringIO("""\
id metadata
0 mno
1 )(*&^%$#@!
""")
p = tskit.parse_populations(
populations, strict=False, encoding='utf8', base64_metadata=False)
expected = ['mno', ')(*&^%$#@!']
for a, b in zip(expected, p):
self.assertEqual(a.encode('utf8'),
b.metadata)
| true | true |
f72fa0e8e5609366cc6f530508b8be45c3bde4a6 | 410 | py | Python | student-work/quinn_zepeda/exercism/python/difference-of-squares/difference_of_squares.py | developerQuinnZ/this_will_work | 5587a9fd030b47f9df6514e45c887b6872d2a4a1 | [
"MIT"
] | null | null | null | student-work/quinn_zepeda/exercism/python/difference-of-squares/difference_of_squares.py | developerQuinnZ/this_will_work | 5587a9fd030b47f9df6514e45c887b6872d2a4a1 | [
"MIT"
] | null | null | null | student-work/quinn_zepeda/exercism/python/difference-of-squares/difference_of_squares.py | developerQuinnZ/this_will_work | 5587a9fd030b47f9df6514e45c887b6872d2a4a1 | [
"MIT"
] | null | null | null | def square_of_sum(number):
count = 0
for i in range(1,number + 1):
count += i
c_squared = count**2
return c_squared
def sum_of_squares(number):
total = 0
for i in range(1,number + 1):
total = total + (i**2)
return total
def difference(number):
first = square_of_sum(number)
second = sum_of_squares(number)
total = abs(first - second)
return total
| 20.5 | 35 | 0.617073 | def square_of_sum(number):
count = 0
for i in range(1,number + 1):
count += i
c_squared = count**2
return c_squared
def sum_of_squares(number):
total = 0
for i in range(1,number + 1):
total = total + (i**2)
return total
def difference(number):
first = square_of_sum(number)
second = sum_of_squares(number)
total = abs(first - second)
return total
| true | true |
f72fa13b43a3421e3c2d1f7f4c41c033295dd9e3 | 385 | py | Python | tino/server.py | NotSoSmartDev/Tino | fd3a941cc1efe07cd9eff209a9e3735a8f7dd537 | [
"MIT"
] | 143 | 2020-06-10T06:07:26.000Z | 2022-03-02T10:09:16.000Z | tino/server.py | NotSoSmartDev/Tino | fd3a941cc1efe07cd9eff209a9e3735a8f7dd537 | [
"MIT"
] | 1 | 2020-06-12T21:52:57.000Z | 2020-06-12T21:52:57.000Z | tino/server.py | NotSoSmartDev/Tino | fd3a941cc1efe07cd9eff209a9e3735a8f7dd537 | [
"MIT"
] | 6 | 2020-06-11T19:21:52.000Z | 2021-12-21T08:33:27.000Z | import uvicorn
class Server(uvicorn.Server):
async def startup(self, sockets=None):
await super().startup(sockets=sockets)
for f in self.config.loaded_app.startup_funcs:
await f()
async def shutdown(self, sockets=None):
await super().shutdown(sockets=sockets)
for f in self.config.loaded_app.shutdown_funcs:
await f()
| 27.5 | 55 | 0.654545 | import uvicorn
class Server(uvicorn.Server):
async def startup(self, sockets=None):
await super().startup(sockets=sockets)
for f in self.config.loaded_app.startup_funcs:
await f()
async def shutdown(self, sockets=None):
await super().shutdown(sockets=sockets)
for f in self.config.loaded_app.shutdown_funcs:
await f()
| true | true |
f72fa17a88f924a7ba488b256d1381abe02cf435 | 381 | py | Python | src/vertex.py | lavish205/graph-db | 523cfd50ccb6fdc2644f595e3a76dd71e760bbb4 | [
"MIT"
] | 1 | 2020-09-05T03:07:35.000Z | 2020-09-05T03:07:35.000Z | src/vertex.py | lavish205/graph-db | 523cfd50ccb6fdc2644f595e3a76dd71e760bbb4 | [
"MIT"
] | null | null | null | src/vertex.py | lavish205/graph-db | 523cfd50ccb6fdc2644f595e3a76dd71e760bbb4 | [
"MIT"
] | null | null | null | from utils import Compression
class Vertex(object):
def __init__(self, identifier, ctype):
self.id = identifier
self.type = ctype
def compress(self):
return Compression.compress(self)
@staticmethod
def decompress(val):
return Compression.decompress(val)
def __repr__(self):
return '{}-{}'.format(self.id, self.type)
| 21.166667 | 49 | 0.643045 | from utils import Compression
class Vertex(object):
def __init__(self, identifier, ctype):
self.id = identifier
self.type = ctype
def compress(self):
return Compression.compress(self)
@staticmethod
def decompress(val):
return Compression.decompress(val)
def __repr__(self):
return '{}-{}'.format(self.id, self.type)
| true | true |
f72fa21904e69b4faea9e41fe537a9887bcd643e | 2,662 | py | Python | python/array.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | null | null | null | python/array.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | 1 | 2021-03-10T04:00:01.000Z | 2021-03-10T04:00:01.000Z | python/array.py | mr-uuid/snippets | 49bb59641d8160d7635b8d5e574cb50f9e5362e2 | [
"MIT"
] | null | null | null | import unittest
class Tester(unittest.TestCase):
def test_zip(self):
"""
zip takes to arrays and makes an array of tuples where tuple 1 is a
tuple composed of element 1 of array 1 and 2, etc...
"""
# combines to arrays into one array of tuples
self.assertEqual(
zip(sorted(set('qwerty')), sorted(set('asdfgh'))),
[('e', 'a'), ('q', 'd'), ('r', 'f'),
('t', 'g'), ('w', 'h'), ('y', 's')]
)
questions = ['name', 'quest', 'favorite color', "WILL GET SKIPPED"]
answers = ['lancelot', 'the holy grail', 'blue']
self.assertEqual(
zip(questions, answers),
[('name', 'lancelot'), ('quest', 'the holy grail'),
('favorite color', 'blue')]
)
a = [1, 2]
b = [(1), (2)]
c = [(1,), (2,)]
d = [(1, 1), (2, 2)]
self.assertEquals(
zip(a, d),
zip(b, d),
[(1, (1, 1)), (2, (2, 2))]
)
self.assertEquals(
zip(a, b),
[(1, 1), (2, 2)],
)
self.assertEquals(
zip(a, c),
zip(b, c),
[(1, (1,)), (2, (2,))],
)
self.assertEquals(
zip(c, d),
[((1,), (1, 1)), ((2,), (2, 2))],
)
def test_any(self):
"""
any([array])
=> takes an array and returns true if any of the elements are true
"""
self.assertEquals(any([True, False]), True)
self.assertEquals(any([None, "apple"]), True)
self.assertEquals(any([False, False]), False)
self.assertEquals(any([None, ""]), False)
def test_enumerate_and_string_sets(self):
"""
* set('string') => returns a set of the charcacters of the string,
it also skips any duplicate characters.
* enumerate(<list>) => returns a list of the following nature:
[(1, <first_element_of_list>), ..., (N, <Nth_element_of_list>)]
* <dict>.items() => returns a list of the following nature:
[(key, value), ...]
"""
# generates an itterator that returns [(index, value), ....]
char_list = [(index, v) for index, v in enumerate(sorted(set('abca')))]
self.assertEquals(
{0: "a", 1: 'b', 2: 'c'}.items(),
char_list
)
def test_reverse_enumerate_and_string_sets(self):
self.assertEquals(
[x for x in reversed(sorted(set(('aceg'*4) + ('bdfh'*3))))],
list(reversed(sorted(set('abcdefgh'))))
)
if __name__ == "__main__":
unittest.main()
| 30.25 | 79 | 0.472953 | import unittest
class Tester(unittest.TestCase):
def test_zip(self):
self.assertEqual(
zip(sorted(set('qwerty')), sorted(set('asdfgh'))),
[('e', 'a'), ('q', 'd'), ('r', 'f'),
('t', 'g'), ('w', 'h'), ('y', 's')]
)
questions = ['name', 'quest', 'favorite color', "WILL GET SKIPPED"]
answers = ['lancelot', 'the holy grail', 'blue']
self.assertEqual(
zip(questions, answers),
[('name', 'lancelot'), ('quest', 'the holy grail'),
('favorite color', 'blue')]
)
a = [1, 2]
b = [(1), (2)]
c = [(1,), (2,)]
d = [(1, 1), (2, 2)]
self.assertEquals(
zip(a, d),
zip(b, d),
[(1, (1, 1)), (2, (2, 2))]
)
self.assertEquals(
zip(a, b),
[(1, 1), (2, 2)],
)
self.assertEquals(
zip(a, c),
zip(b, c),
[(1, (1,)), (2, (2,))],
)
self.assertEquals(
zip(c, d),
[((1,), (1, 1)), ((2,), (2, 2))],
)
def test_any(self):
self.assertEquals(any([True, False]), True)
self.assertEquals(any([None, "apple"]), True)
self.assertEquals(any([False, False]), False)
self.assertEquals(any([None, ""]), False)
def test_enumerate_and_string_sets(self):
char_list = [(index, v) for index, v in enumerate(sorted(set('abca')))]
self.assertEquals(
{0: "a", 1: 'b', 2: 'c'}.items(),
char_list
)
def test_reverse_enumerate_and_string_sets(self):
self.assertEquals(
[x for x in reversed(sorted(set(('aceg'*4) + ('bdfh'*3))))],
list(reversed(sorted(set('abcdefgh'))))
)
if __name__ == "__main__":
unittest.main()
| true | true |
f72fa31aa61c2010032ba331da9c46f0c28c8f64 | 586 | py | Python | recipys/types.py | gmso/recipys | 8ca39a7b0ace2c678fe7a5c8271c6843db6c2c35 | [
"MIT"
] | 13 | 2021-07-24T20:49:35.000Z | 2021-08-21T18:15:16.000Z | recipys/types.py | gmso/recipys | 8ca39a7b0ace2c678fe7a5c8271c6843db6c2c35 | [
"MIT"
] | 2 | 2021-08-17T17:11:09.000Z | 2021-09-01T19:05:17.000Z | recipys/types.py | gmso/recipys | 8ca39a7b0ace2c678fe7a5c8271c6843db6c2c35 | [
"MIT"
] | 1 | 2021-07-29T16:36:35.000Z | 2021-07-29T16:36:35.000Z | from dataclasses import dataclass
from typing import Optional, List
@dataclass
class RecipeConstraints:
meal: Optional[str] = None
ingredients: Optional[List[str]] = None
@dataclass
class Printable:
title: str = ""
ingredients: str = ""
preparation: str = ""
error_message: Optional[str] = None
warning_message: Optional[str] = None
info_message: Optional[str] = None
@dataclass
class FetchingError(Exception):
message: str = "An error ocurred"
@dataclass
class PrintInterrupt(Exception):
printable: Printable
| 20.206897 | 44 | 0.677474 | from dataclasses import dataclass
from typing import Optional, List
@dataclass
class RecipeConstraints:
meal: Optional[str] = None
ingredients: Optional[List[str]] = None
@dataclass
class Printable:
title: str = ""
ingredients: str = ""
preparation: str = ""
error_message: Optional[str] = None
warning_message: Optional[str] = None
info_message: Optional[str] = None
@dataclass
class FetchingError(Exception):
message: str = "An error ocurred"
@dataclass
class PrintInterrupt(Exception):
printable: Printable
| true | true |
f72fa471e47a678d1cdcba520ccfb797969a10c8 | 43 | py | Python | src/brython_jinja2/__init__.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-09-13T17:51:55.000Z | 2020-11-25T18:47:12.000Z | src/brython_jinja2/__init__.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | 2 | 2020-11-25T19:18:15.000Z | 2021-06-01T21:48:12.000Z | src/brython_jinja2/__init__.py | jonathanverner/brython-jinja2 | cec6e16de1750203a858d0acf590f230fc3bf848 | [
"BSD-3-Clause"
] | null | null | null | from . import platform
from . import utils
| 14.333333 | 22 | 0.767442 | from . import platform
from . import utils
| true | true |
f72fa4cf65c6c8ffeabe5891fa79f70b7f2fe473 | 8,228 | py | Python | app/worker/tasks/importers/historical_data_importer.py | CodeTheChangeUBC/reBOOT | df6a7d9990fc261c28bf65b83b561a765dc78723 | [
"MIT"
] | 10 | 2017-10-17T04:35:44.000Z | 2021-03-19T21:12:15.000Z | app/worker/tasks/importers/historical_data_importer.py | CodeTheChangeUBC/reBOOT | df6a7d9990fc261c28bf65b83b561a765dc78723 | [
"MIT"
] | 224 | 2017-10-18T18:33:48.000Z | 2022-02-02T03:33:04.000Z | app/worker/tasks/importers/historical_data_importer.py | CodeTheChangeUBC/reBOOT | df6a7d9990fc261c28bf65b83b561a765dc78723 | [
"MIT"
] | 1 | 2018-08-02T03:10:25.000Z | 2018-08-02T03:10:25.000Z | import re
from dateutil.parser import parse
from django.utils import timezone as tz
from .base_csv_importer import BaseCsvImporter
from app.constants.item_map import ITEM_MAP
from app.enums import ItemStatusEnum
from app.models import Donor, Donation, Item, ItemDevice, ItemDeviceType
class HistoricalDataImporter(BaseCsvImporter):
"""Takes 10b format file path and imports into the database using the 10x
format into the appropriate tables.
:param str csvfile: csvfile path
"""
bulk_model = Item
def parse_row(self, row):
donor = self._goc_donor(self._parse_donor(row))
donation = self._goc_donation(self._parse_donation(row), donor)
device_type = self._goc_device_type(self._parse_device_type(row))
device = self._goc_item_device(
self._parse_item_device(row), device_type)
self.model_bulk.append(
self._new_item(self._parse_item(row), donation, device))
def _parse_donor(self, row):
"""Takes a row and parses relevant Donor data into a dict.
:param dict row: A CSV row dict
:return: Donor related data dict
:rtype: dict
"""
receipt_option_f = {
"notneeded": "REFUSED",
"email": "EMAIL",
"mail": "MAIL"
}.get(re.sub("[^a-zA-Z]+", "", row["TRV"]).lower(), "EMAIL")
documented_at_f = self._parse_date(row["Date"])
postal_f = re.sub("[^a-zA-Z0-9 ]+", "", row["Postal Code"]).upper()[:7]
return {
"donor_name": row["Donor Name"],
"contact_name": row.get("Contact", row["Donor Name"]),
"email": row["Email"],
"want_receipt": receipt_option_f,
"telephone_number": row["Telephone"],
"mobile_number": row["Mobile"],
"address_line_one": row["Address"],
"address_line_two": row.get("Unit", ""),
"city": row["City"],
"province": row["Prov."],
"postal_code": postal_f,
"customer_ref": row["CustRef"],
"documented_at": documented_at_f
}
def _parse_donation(self, row):
"""Takes a csv row and parses relevant Donation data into a dict.
:param dict row: A CSV row dict
:return: Donation related data dict
:rtype: dict
"""
donate_date_f = documented_at_f = self._parse_date(row["Date"])
return {
"tax_receipt_no": row["TR#"],
"pledge_date": donate_date_f,
"donate_date": donate_date_f,
"test_date": donate_date_f,
"valuation_date": donate_date_f,
"pick_up": row["PPC"],
"source": "HISTORICAL_DATA", # Fixed
"documented_at": documented_at_f,
"tax_receipt_created_at": tz.now()
}
def _parse_device_type(self, row):
"""Takes a csv row and parses relevant ItemDeviceType data into a dict.
:param dict row: A CSV row dict
:return: ItemDeviceType related data dict
:rtype: dict
"""
dtype = ITEM_MAP.get(row["Item Description"].lower(), None)
if dtype is None:
return {
"category": "not categorized",
"device_type": row["Item Description"],
}
return dtype
def _parse_item_device(self, row):
"""Takes a csv row and parses relevant ItemDevice data into a dict.
:param dict row: A CSV row dict
:return: ItemDevice related data dict
:rtype: dict
"""
return {
"make": row["Manufacturer"],
"model": row["Model"],
"cpu_type": "",
"speed": "",
"memory": None,
"hd_size": None,
"screen_size": "",
"hdd_serial_number": "",
"operating_system": ""
}
def _parse_item(self, row):
"""Takes a csv row and parses relevant Item data into a dict.
:param dict row: A CSV row dict
:return: Item related data dict
:rtype: dict
"""
working_f = row["Working"].lower() == "y"
donate_date_f = documented_at_f = self._parse_date(row["Date"])
batch_f = "" if row["Batch"] == "0" else row["Batch"]
particulars_f = row["Item Particulars"]
if particulars_f == "0":
particulars_f = ""
qty_f = int(row.get("Qty", 0))
try:
value_f = float(re.sub("[^0-9|.]", "", row["Value"]))
except ValueError:
value_f = 0.0
value_per_f = round(value_f / qty_f, 2)
return {
"serial_number": "",
"asset_tag": "",
"particulars": particulars_f,
"quantity": row["Qty"],
"working": working_f,
"condition": row["Condition"],
"quality": row["Quality"],
"batch": batch_f,
"value": str(value_per_f),
"verified": True,
"documented_at": documented_at_f,
"status": ItemStatusEnum.RECEIVED.name,
"notes": particulars_f,
"valuation_date": donate_date_f,
# "weight":
# "valuation_supporting_doc":
}
def _goc_donor(self, data):
"""get_or_create a Donor.
:param dict row: A Donor dict
:return: Donor object
:rtype: app.models.Donor instance
"""
try:
donor = Donor.objects.filter(
donor_name=data['donor_name'],
contact_name=data['contact_name'],
email=data['email'],
want_receipt=data['want_receipt'],
telephone_number=data['telephone_number'],
mobile_number=data['mobile_number'],
address_line_one=data['address_line_one'],
address_line_two=data['address_line_two'],
city=data['city'],
province=data['province'],
postal_code=data['postal_code'],
).first()
if donor is None:
raise Donor.DoesNotExist
except Exception:
donor = Donor.objects.create(**data)
return donor
def _goc_donation(self, data, donor):
"""get_or_create a Donation.
:param dict row: A Donation dict
:param obj donor: app.model.Donor object
:return: Donation object
:rtype: app.models.Donation instance
"""
try:
# Match by tax receipt number rather than full donation data
d = Donation.objects.get(tax_receipt_no=data.get("tax_receipt_no"))
except Exception:
d = Donation.objects.create(donor=donor, **data)
return d
def _goc_device_type(self, data):
"""get_or_create a ItemDeviceType.
:param dict row: A ItemDeviceType dict
:return: ItemDeviceType object
:rtype: app.models.ItemDeviceType instance
"""
dtype, unique = ItemDeviceType.objects.get_or_create(**data)
return dtype
def _goc_item_device(self, data, dtype):
"""get_or_create a ItemDevice.
:param dict row: A ItemDevice dict
:param obj device_type: app.model.ItemDeviceType object
:return: ItemDevice object
:rtype: app.models.ItemDevice instance
"""
i, unique = ItemDevice.objects.get_or_create(dtype=dtype, **data)
return i
def _new_item(self, data, donation, device):
"""Initialize a new Item object.
:param dict row: A Item dict
:param obj donation: app.model.Donation object
:param obj device: app.model.ItemDevice object
:return: Item object
:rtype: app.models.Item instance
"""
try:
i = Item(donation=donation, device=device, **data)
i.clean_fields()
except Exception as e:
self.logger.error(f"Item Data: {i.underscore_serialize()}")
raise e
return i
@staticmethod
def _parse_date(date_f):
""" Takes dynamic date formats and unifies them into Y-m-d format
"""
date = parse(date_f, dayfirst=True)
return date.strftime('%Y-%m-%d')
| 34.426778 | 79 | 0.565386 | import re
from dateutil.parser import parse
from django.utils import timezone as tz
from .base_csv_importer import BaseCsvImporter
from app.constants.item_map import ITEM_MAP
from app.enums import ItemStatusEnum
from app.models import Donor, Donation, Item, ItemDevice, ItemDeviceType
class HistoricalDataImporter(BaseCsvImporter):
bulk_model = Item
def parse_row(self, row):
donor = self._goc_donor(self._parse_donor(row))
donation = self._goc_donation(self._parse_donation(row), donor)
device_type = self._goc_device_type(self._parse_device_type(row))
device = self._goc_item_device(
self._parse_item_device(row), device_type)
self.model_bulk.append(
self._new_item(self._parse_item(row), donation, device))
def _parse_donor(self, row):
receipt_option_f = {
"notneeded": "REFUSED",
"email": "EMAIL",
"mail": "MAIL"
}.get(re.sub("[^a-zA-Z]+", "", row["TRV"]).lower(), "EMAIL")
documented_at_f = self._parse_date(row["Date"])
postal_f = re.sub("[^a-zA-Z0-9 ]+", "", row["Postal Code"]).upper()[:7]
return {
"donor_name": row["Donor Name"],
"contact_name": row.get("Contact", row["Donor Name"]),
"email": row["Email"],
"want_receipt": receipt_option_f,
"telephone_number": row["Telephone"],
"mobile_number": row["Mobile"],
"address_line_one": row["Address"],
"address_line_two": row.get("Unit", ""),
"city": row["City"],
"province": row["Prov."],
"postal_code": postal_f,
"customer_ref": row["CustRef"],
"documented_at": documented_at_f
}
def _parse_donation(self, row):
donate_date_f = documented_at_f = self._parse_date(row["Date"])
return {
"tax_receipt_no": row["TR#"],
"pledge_date": donate_date_f,
"donate_date": donate_date_f,
"test_date": donate_date_f,
"valuation_date": donate_date_f,
"pick_up": row["PPC"],
"source": "HISTORICAL_DATA",
"documented_at": documented_at_f,
"tax_receipt_created_at": tz.now()
}
def _parse_device_type(self, row):
dtype = ITEM_MAP.get(row["Item Description"].lower(), None)
if dtype is None:
return {
"category": "not categorized",
"device_type": row["Item Description"],
}
return dtype
def _parse_item_device(self, row):
return {
"make": row["Manufacturer"],
"model": row["Model"],
"cpu_type": "",
"speed": "",
"memory": None,
"hd_size": None,
"screen_size": "",
"hdd_serial_number": "",
"operating_system": ""
}
def _parse_item(self, row):
working_f = row["Working"].lower() == "y"
donate_date_f = documented_at_f = self._parse_date(row["Date"])
batch_f = "" if row["Batch"] == "0" else row["Batch"]
particulars_f = row["Item Particulars"]
if particulars_f == "0":
particulars_f = ""
qty_f = int(row.get("Qty", 0))
try:
value_f = float(re.sub("[^0-9|.]", "", row["Value"]))
except ValueError:
value_f = 0.0
value_per_f = round(value_f / qty_f, 2)
return {
"serial_number": "",
"asset_tag": "",
"particulars": particulars_f,
"quantity": row["Qty"],
"working": working_f,
"condition": row["Condition"],
"quality": row["Quality"],
"batch": batch_f,
"value": str(value_per_f),
"verified": True,
"documented_at": documented_at_f,
"status": ItemStatusEnum.RECEIVED.name,
"notes": particulars_f,
"valuation_date": donate_date_f,
}
def _goc_donor(self, data):
try:
donor = Donor.objects.filter(
donor_name=data['donor_name'],
contact_name=data['contact_name'],
email=data['email'],
want_receipt=data['want_receipt'],
telephone_number=data['telephone_number'],
mobile_number=data['mobile_number'],
address_line_one=data['address_line_one'],
address_line_two=data['address_line_two'],
city=data['city'],
province=data['province'],
postal_code=data['postal_code'],
).first()
if donor is None:
raise Donor.DoesNotExist
except Exception:
donor = Donor.objects.create(**data)
return donor
def _goc_donation(self, data, donor):
try:
d = Donation.objects.get(tax_receipt_no=data.get("tax_receipt_no"))
except Exception:
d = Donation.objects.create(donor=donor, **data)
return d
def _goc_device_type(self, data):
dtype, unique = ItemDeviceType.objects.get_or_create(**data)
return dtype
def _goc_item_device(self, data, dtype):
i, unique = ItemDevice.objects.get_or_create(dtype=dtype, **data)
return i
def _new_item(self, data, donation, device):
try:
i = Item(donation=donation, device=device, **data)
i.clean_fields()
except Exception as e:
self.logger.error(f"Item Data: {i.underscore_serialize()}")
raise e
return i
@staticmethod
def _parse_date(date_f):
date = parse(date_f, dayfirst=True)
return date.strftime('%Y-%m-%d')
| true | true |
f72fa56028b7563a6c7ecc30355b5545eb740df6 | 16,939 | py | Python | arxiv_public_data/authors.py | The-Collaboratory/arxiv-public-datasets | a3578b2a693c964ed8310d8ddc3a3801f33ce3c9 | [
"MIT"
] | null | null | null | arxiv_public_data/authors.py | The-Collaboratory/arxiv-public-datasets | a3578b2a693c964ed8310d8ddc3a3801f33ce3c9 | [
"MIT"
] | null | null | null | arxiv_public_data/authors.py | The-Collaboratory/arxiv-public-datasets | a3578b2a693c964ed8310d8ddc3a3801f33ce3c9 | [
"MIT"
] | null | null | null | # https://github.com/arXiv/arxiv-base@32e6ad0
"""
Copyright 2017 Cornell University
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""Parse Authors lines to extract author and affiliation data."""
import re
import os
import gzip
import json
from itertools import dropwhile
from typing import Dict, Iterator, List, Tuple
from multiprocessing import Pool, cpu_count
# from arxiv_public_data.tex2utf import tex2utf
# from arxiv_public_data.config import LOGGER, DIR_OUTPUT
# logger = LOGGER.getChild('authorsplit')
PREFIX_MATCH = 'van|der|de|la|von|del|della|da|mac|ter|dem|di|vaziri'
"""
Takes data from an Author: line in the current arXiv abstract
file and returns a structured set of data:
author_list_ptr = [
[ author1_keyname, author1_firstnames, author1_suffix, affil1, affil2 ] ,
[ author2_keyname, author2_firstnames, author1_suffix, affil1 ] ,
[ author3_keyname, author3_firstnames, author1_suffix ]
]
Abstracted from Dienst software for OAI1 and other uses. This
routine should just go away when a better metadata structure is
adopted that deals with names and affiliations properly.
Must remember that there is at least one person one the archive
who has only one name, this should clearly be considered the key name.
Code originally written by Christina Scovel, Simeon Warner Dec99/Jan00
2000-10-16 - separated.
2000-12-07 - added support for suffix
2003-02-14 - get surname prefixes from arXiv::Filters::Index [Simeon]
2007-10-01 - created test script, some tidying [Simeon]
2018-05-25 - Translated from Perl to Python [Brian C.]
"""
def parse_author_affil(authors: str) -> List[List[str]]:
"""
Parse author line and returns an list of author and affiliation data.
The list for each author will have at least three elements for
keyname, firstname(s) and suffix. The keyname will always have content
but the other strings might be empty strings if there is no firstname
or suffix. Any additional elements after the first three are affiliations,
there may be zero or more.
Handling of prefix "XX collaboration" etc. is duplicated here and in
arXiv::HTML::AuthorLink -- it shouldn't be. Likely should just be here.
This routine is just a wrapper around the two parts that first split
the authors line into parts, and then back propagate the affiliations.
The first part is to be used along for display where we do not want
to back propagate affiliation information.
:param authors: string of authors from abs file or similar
:return:
Returns a structured set of data:
author_list_ptr = [
[ author1_keyname, author1_firstnames, author1_suffix, affil1, affil2 ],
[ author2_keyname, author2_firstnames, author1_suffix, affil1 ] ,
[ author3_keyname, author3_firstnames, author1_suffix ]
]
"""
return _parse_author_affil_back_propagate(
**_parse_author_affil_split(authors))
def _parse_author_affil_split(author_line: str) -> Dict:
"""
Split author line into author and affiliation data.
Take author line, tidy spacing and punctuation, and then split up into
individual author an affiliation data. Has special cases to avoid splitting
an initial collaboration name and records in $back_propagate_affiliation_to
the fact that affiliations should not be back propagated to collaboration
names.
Does not handle multiple collaboration names.
"""
if not author_line:
return {'author_list': [], 'back_prop': 0}
names: List[str] = split_authors(author_line)
if not names:
return {'author_list': [], 'back_prop': 0}
names = _remove_double_commas(names)
# get rid of commas at back
namesIter: Iterator[str] = reversed(
list(dropwhile(lambda x: x == ',', reversed(names))))
# get rid of commas at front
names = list(dropwhile(lambda x: x == ',', namesIter))
# Extract all names (all parts not starting with comma or paren)
names = list(map(_tidy_name, filter(
lambda x: re.match('^[^](,]', x), names)))
names = list(filter(lambda n: not re.match(
r'^\s*et\.?\s+al\.?\s*', n, flags=re.IGNORECASE), names))
(names, author_list,
back_propagate_affiliations_to) = _collaboration_at_start(names)
(enumaffils) = _enum_collaboration_at_end(author_line)
# Split name into keyname and firstnames/initials.
# Deal with different patterns in turn: prefixes, suffixes, plain
# and single name.
patterns = [('double-prefix',
r'^(.*)\s+(' + PREFIX_MATCH + r')\s(' +
PREFIX_MATCH + r')\s(\S+)$'),
('name-prefix-name',
r'^(.*)\s+(' + PREFIX_MATCH + r')\s(\S+)$'),
('name-name-prefix',
r'^(.*)\s+(\S+)\s(I|II|III|IV|V|Sr|Jr|Sr\.|Jr\.)$'),
('name-name',
r'^(.*)\s+(\S+)$'), ]
# Now go through names in turn and try to get affiliations
# to go with them
for name in names:
pattern_matches = ((mtype, re.match(m, name, flags=re.IGNORECASE))
for (mtype, m) in patterns)
(mtype, match) = next(((mtype, m)
for (mtype, m) in pattern_matches
if m is not None), ('default', None))
if match is None:
author_entry = [name, '', '']
elif mtype == 'double-prefix':
s = '{} {} {}'.format(match.group(
2), match.group(3), match.group(4))
author_entry = [s, match.group(1), '']
elif mtype == 'name-prefix-name':
s = '{} {}'.format(match.group(2), match.group(3))
author_entry = [s, match.group(1), '']
elif mtype == 'name-name-prefix':
author_entry = [match.group(2), match.group(1), match.group(3)]
elif mtype == 'name-name':
author_entry = [match.group(2), match.group(1), '']
else:
author_entry = [name, '', '']
# search back in author_line for affiliation
author_entry = _add_affiliation(
author_line, enumaffils, author_entry, name)
author_list.append(author_entry)
return {'author_list': author_list,
'back_prop': back_propagate_affiliations_to}
def parse_author_affil_utf(authors: str) -> List:
"""
Call parse_author_affil() and do TeX to UTF conversion.
Output structure is the same but should be in UTF and not TeX.
"""
if not authors:
return []
return list(map(lambda author: list(map(tex2utf, author)),
parse_author_affil(authors)))
def _remove_double_commas(items: List[str]) -> List[str]:
parts: List[str] = []
last = ''
for pt in items:
if pt == ',' and last == ',':
continue
else:
parts.append(pt)
last = pt
return parts
def _tidy_name(name: str) -> str:
name = re.sub(r'\s\s+', ' ', name) # also gets rid of CR
# add space after dot (except in TeX)
name = re.sub(r'(?<!\\)\.(\S)', r'. \g<1>', name)
return name
def _collaboration_at_start(names: List[str]) \
-> Tuple[List[str], List[List[str]], int]:
"""Perform special handling of collaboration at start."""
author_list = []
back_propagate_affiliations_to = 0
while len(names) > 0:
m = re.search(r'([a-z0-9\s]+\s+(collaboration|group|team))',
names[0], flags=re.IGNORECASE)
if not m:
break
# Add to author list
author_list.append([m.group(1), '', ''])
back_propagate_affiliations_to += 1
# Remove from names
names.pop(0)
# Also swallow and following comma or colon
if names and (names[0] == ',' or names[0] == ':'):
names.pop(0)
return names, author_list, back_propagate_affiliations_to
def _enum_collaboration_at_end(author_line: str)->Dict:
"""Get separate set of enumerated affiliations from end of author_line."""
# Now see if we have a separate set of enumerated affiliations
# This is indicated by finding '(\s*('
line_m = re.search(r'\(\s*\((.*)$', author_line)
if not line_m:
return {}
enumaffils = {}
affils = re.sub(r'\s*\)\s*$', '', line_m.group(1))
# Now expect to have '1) affil1 (2) affil2 (3) affil3'
for affil in affils.split('('):
# Now expect `1) affil1 ', discard if no match
m = re.match(r'^(\d+)\)\s*(\S.*\S)\s*$', affil)
if m:
enumaffils[m.group(1)] = re.sub(r'[\.,\s]*$', '', m.group(2))
return enumaffils
def _add_affiliation(author_line: str,
enumaffils: Dict,
author_entry: List[str],
name: str) -> List:
"""
Add author affiliation to author_entry if one is found in author_line.
This should deal with these cases
Smith B(labX) Smith B(1) Smith B(1, 2) Smith B(1 & 2) Smith B(1 and 2)
"""
en = re.escape(name)
namerex = r'{}\s*\(([^\(\)]+)'.format(en.replace(' ', 's*'))
m = re.search(namerex, author_line, flags=re.IGNORECASE)
if not m:
return author_entry
# Now see if we have enumerated references (just commas, digits, &, and)
affils = m.group(1).rstrip().lstrip()
affils = re.sub(r'(&|and)/,', ',', affils, flags=re.IGNORECASE)
if re.match(r'^[\d,\s]+$', affils):
for affil in affils.split(','):
if affil in enumaffils:
author_entry.append(enumaffils[affil])
else:
author_entry.append(affils)
return author_entry
def _parse_author_affil_back_propagate(author_list: List[List[str]],
back_prop: int) -> List[List[str]]:
"""Back propagate author affiliation.
Take the author list structure generated by parse_author_affil_split(..)
and propagate affiliation information backwards to preceeding author
entries where none was give. Stop before entry $back_prop to avoid
adding affiliation information to collaboration names.
given, eg:
a.b.first, c.d.second (affil)
implies
a.b.first (affil), c.d.second (affil)
and in more complex cases:
a.b.first, c.d.second (1), e.f.third, g.h.forth (2,3)
implies
a.b.first (1), c.d.second (1), e.f.third (2,3), g.h.forth (2,3)
"""
last_affil: List[str] = []
for x in range(len(author_list) - 1, max(back_prop - 1, -1), -1):
author_entry = author_list[x]
if len(author_entry) > 3: # author has affiliation,store
last_affil = author_entry
elif last_affil:
# author doesn't have affil but later one did => copy
author_entry.extend(last_affil[3:])
return author_list
def split_authors(authors: str) -> List:
"""
Split author string into authors entity lists.
Take an author line as a string and return a reference to a list of the
different name and affiliation blocks. While this does normalize spacing
and 'and', it is a key feature that the set of strings returned can be
concatenated to reproduce the original authors line. This code thus
provides a very graceful degredation for badly formatted authors lines, as
the text at least shows up.
"""
# split authors field into blocks with boundaries of ( and )
if not authors:
return []
aus = re.split(r'(\(|\))', authors)
aus = list(filter(lambda x: x != '', aus))
blocks = []
if len(aus) == 1:
blocks.append(authors)
else:
c = ''
depth = 0
for bit in aus:
if bit == '':
continue
if bit == '(': # track open parentheses
depth += 1
if depth == 1:
blocks.append(c)
c = '('
else:
c = c + bit
elif bit == ')': # track close parentheses
depth -= 1
c = c + bit
if depth == 0:
blocks.append(c)
c = ''
else: # haven't closed, so keep accumulating
continue
else:
c = c + bit
if c:
blocks.append(c)
listx = []
for block in blocks:
block = re.sub(r'\s+', ' ', block)
if re.match(r'^\(', block): # it is a comment
listx.append(block)
else: # it is a name
block = re.sub(r',?\s+(and|\&)\s', ',', block)
names = re.split(r'(,|:)\s*', block)
for name in names:
if not name:
continue
name = name.rstrip().lstrip()
if name:
listx.append(name)
# Recombine suffixes that were separated with a comma
parts: List[str] = []
for p in listx:
if re.match(r'^(Jr\.?|Sr\.?\[IV]{2,})$', p) \
and len(parts) >= 2 \
and parts[-1] == ',' \
and not re.match(r'\)$', parts[-2]):
separator = parts.pop()
last = parts.pop()
recomb = "{}{} {}".format(last, separator, p)
parts.append(recomb)
else:
parts.append(p)
return parts
def parse_authorline(authors: str) -> str:
"""
The external facing function from this module. Converts a complex authorline
into a simple one with only UTF-8.
Parameters
----------
authors : string
The raw author line from the metadata
Returns
-------
clean_authors : string
String represeting cleaned author line
Examples
--------
>>> parse_authorline('A. Losev, S. Shadrin, I. Shneiberg')
'Losev, A.; Shadrin, S.; Shneiberg, I.'
>>> parse_authorline("C. Bal\\'azs, E. L. Berger, P. M. Nadolsky, C.-P. Yuan")
'Balázs, C.; Berger, E. L.; Nadolsky, P. M.; Yuan, C. -P.'
>>> parse_authorline('Stephen C. Power (Lancaster University), Baruch Solel (Technion)')
'Power, Stephen C.; Solel, Baruch'
>>> parse_authorline("L. Scheck (1), H.-Th. Janka (1), T. Foglizzo (2), and K. Kifonidis (1)\n ((1) MPI for Astrophysics, Garching; (2) Service d'Astrophysique, CEA-Saclay)")
'Scheck, L.; Janka, H. -Th.; Foglizzo, T.; Kifonidis, K.'
"""
names = parse_author_affil_utf(authors)
return '; '.join([', '.join([q for q in n[:2] if q]) for n in names])
def _parse_article_authors(article_author):
try:
return [article_author[0], parse_author_affil_utf(article_author[1])]
except Exception as e:
msg = "Author split failed for article {}".format(article_author[0])
logger.error(msg)
logger.exception(e)
return [article_author[0], '']
def parse_authorline_parallel(article_authors, n_processes=None):
"""
Parallelize `parse_authorline`
Parameters
----------
article_authors : list
list of tuples (arXiv id, author strings from metadata)
(optional)
n_processes : int
number of processes
Returns
-------
authorsplit : list
list of author strings in standardized format
[
[ author1_keyname, author1_firstnames, author1_suffix, affil1,
affil2 ] ,
[ author2_keyname, author2_firstnames, author1_suffix, affil1 ] ,
[ author3_keyname, author3_firstnames, author1_suffix ]
]
"""
logger.info(
'Parsing author lines for {} articles...'.format(len(article_authors))
)
pool = Pool(n_processes)
parsed = pool.map(_parse_article_authors, article_authors)
outdict = {aid: auth for aid, auth in parsed}
filename = os.path.join(DIR_OUTPUT, 'authors-parsed.json.gz')
logger.info('Saving to {}'.format(filename))
with gzip.open(filename, 'wb') as fout:
fout.write(json.dumps(outdict).encode('utf-8'))
| 36.040426 | 179 | 0.611724 |
import re
import os
import gzip
import json
from itertools import dropwhile
from typing import Dict, Iterator, List, Tuple
from multiprocessing import Pool, cpu_count
PREFIX_MATCH = 'van|der|de|la|von|del|della|da|mac|ter|dem|di|vaziri'
def parse_author_affil(authors: str) -> List[List[str]]:
return _parse_author_affil_back_propagate(
**_parse_author_affil_split(authors))
def _parse_author_affil_split(author_line: str) -> Dict:
if not author_line:
return {'author_list': [], 'back_prop': 0}
names: List[str] = split_authors(author_line)
if not names:
return {'author_list': [], 'back_prop': 0}
names = _remove_double_commas(names)
namesIter: Iterator[str] = reversed(
list(dropwhile(lambda x: x == ',', reversed(names))))
names = list(dropwhile(lambda x: x == ',', namesIter))
names = list(map(_tidy_name, filter(
lambda x: re.match('^[^](,]', x), names)))
names = list(filter(lambda n: not re.match(
r'^\s*et\.?\s+al\.?\s*', n, flags=re.IGNORECASE), names))
(names, author_list,
back_propagate_affiliations_to) = _collaboration_at_start(names)
(enumaffils) = _enum_collaboration_at_end(author_line)
patterns = [('double-prefix',
r'^(.*)\s+(' + PREFIX_MATCH + r')\s(' +
PREFIX_MATCH + r')\s(\S+)$'),
('name-prefix-name',
r'^(.*)\s+(' + PREFIX_MATCH + r')\s(\S+)$'),
('name-name-prefix',
r'^(.*)\s+(\S+)\s(I|II|III|IV|V|Sr|Jr|Sr\.|Jr\.)$'),
('name-name',
r'^(.*)\s+(\S+)$'), ]
for name in names:
pattern_matches = ((mtype, re.match(m, name, flags=re.IGNORECASE))
for (mtype, m) in patterns)
(mtype, match) = next(((mtype, m)
for (mtype, m) in pattern_matches
if m is not None), ('default', None))
if match is None:
author_entry = [name, '', '']
elif mtype == 'double-prefix':
s = '{} {} {}'.format(match.group(
2), match.group(3), match.group(4))
author_entry = [s, match.group(1), '']
elif mtype == 'name-prefix-name':
s = '{} {}'.format(match.group(2), match.group(3))
author_entry = [s, match.group(1), '']
elif mtype == 'name-name-prefix':
author_entry = [match.group(2), match.group(1), match.group(3)]
elif mtype == 'name-name':
author_entry = [match.group(2), match.group(1), '']
else:
author_entry = [name, '', '']
author_entry = _add_affiliation(
author_line, enumaffils, author_entry, name)
author_list.append(author_entry)
return {'author_list': author_list,
'back_prop': back_propagate_affiliations_to}
def parse_author_affil_utf(authors: str) -> List:
if not authors:
return []
return list(map(lambda author: list(map(tex2utf, author)),
parse_author_affil(authors)))
def _remove_double_commas(items: List[str]) -> List[str]:
parts: List[str] = []
last = ''
for pt in items:
if pt == ',' and last == ',':
continue
else:
parts.append(pt)
last = pt
return parts
def _tidy_name(name: str) -> str:
name = re.sub(r'\s\s+', ' ', name)
name = re.sub(r'(?<!\\)\.(\S)', r'. \g<1>', name)
return name
def _collaboration_at_start(names: List[str]) \
-> Tuple[List[str], List[List[str]], int]:
author_list = []
back_propagate_affiliations_to = 0
while len(names) > 0:
m = re.search(r'([a-z0-9\s]+\s+(collaboration|group|team))',
names[0], flags=re.IGNORECASE)
if not m:
break
author_list.append([m.group(1), '', ''])
back_propagate_affiliations_to += 1
names.pop(0)
if names and (names[0] == ',' or names[0] == ':'):
names.pop(0)
return names, author_list, back_propagate_affiliations_to
def _enum_collaboration_at_end(author_line: str)->Dict:
line_m = re.search(r'\(\s*\((.*)$', author_line)
if not line_m:
return {}
enumaffils = {}
affils = re.sub(r'\s*\)\s*$', '', line_m.group(1))
for affil in affils.split('('):
m = re.match(r'^(\d+)\)\s*(\S.*\S)\s*$', affil)
if m:
enumaffils[m.group(1)] = re.sub(r'[\.,\s]*$', '', m.group(2))
return enumaffils
def _add_affiliation(author_line: str,
enumaffils: Dict,
author_entry: List[str],
name: str) -> List:
en = re.escape(name)
namerex = r'{}\s*\(([^\(\)]+)'.format(en.replace(' ', 's*'))
m = re.search(namerex, author_line, flags=re.IGNORECASE)
if not m:
return author_entry
# Now see if we have enumerated references (just commas, digits, &, and)
affils = m.group(1).rstrip().lstrip()
affils = re.sub(r'(&|and)/,', ',', affils, flags=re.IGNORECASE)
if re.match(r'^[\d,\s]+$', affils):
for affil in affils.split(','):
if affil in enumaffils:
author_entry.append(enumaffils[affil])
else:
author_entry.append(affils)
return author_entry
def _parse_author_affil_back_propagate(author_list: List[List[str]],
back_prop: int) -> List[List[str]]:
last_affil: List[str] = []
for x in range(len(author_list) - 1, max(back_prop - 1, -1), -1):
author_entry = author_list[x]
if len(author_entry) > 3: # author has affiliation,store
last_affil = author_entry
elif last_affil:
# author doesn't have affil but later one did => copy
author_entry.extend(last_affil[3:])
return author_list
def split_authors(authors: str) -> List:
if not authors:
return []
aus = re.split(r'(\(|\))', authors)
aus = list(filter(lambda x: x != '', aus))
blocks = []
if len(aus) == 1:
blocks.append(authors)
else:
c = ''
depth = 0
for bit in aus:
if bit == '':
continue
if bit == '(':
depth += 1
if depth == 1:
blocks.append(c)
c = '('
else:
c = c + bit
elif bit == ')':
depth -= 1
c = c + bit
if depth == 0:
blocks.append(c)
c = ''
else:
continue
else:
c = c + bit
if c:
blocks.append(c)
listx = []
for block in blocks:
block = re.sub(r'\s+', ' ', block)
if re.match(r'^\(', block): # it is a comment
listx.append(block)
else: # it is a name
block = re.sub(r',?\s+(and|\&)\s', ',', block)
names = re.split(r'(,|:)\s*', block)
for name in names:
if not name:
continue
name = name.rstrip().lstrip()
if name:
listx.append(name)
# Recombine suffixes that were separated with a comma
parts: List[str] = []
for p in listx:
if re.match(r'^(Jr\.?|Sr\.?\[IV]{2,})$', p) \
and len(parts) >= 2 \
and parts[-1] == ',' \
and not re.match(r'\)$', parts[-2]):
separator = parts.pop()
last = parts.pop()
recomb = "{}{} {}".format(last, separator, p)
parts.append(recomb)
else:
parts.append(p)
return parts
def parse_authorline(authors: str) -> str:
names = parse_author_affil_utf(authors)
return '; '.join([', '.join([q for q in n[:2] if q]) for n in names])
def _parse_article_authors(article_author):
try:
return [article_author[0], parse_author_affil_utf(article_author[1])]
except Exception as e:
msg = "Author split failed for article {}".format(article_author[0])
logger.error(msg)
logger.exception(e)
return [article_author[0], '']
def parse_authorline_parallel(article_authors, n_processes=None):
logger.info(
'Parsing author lines for {} articles...'.format(len(article_authors))
)
pool = Pool(n_processes)
parsed = pool.map(_parse_article_authors, article_authors)
outdict = {aid: auth for aid, auth in parsed}
filename = os.path.join(DIR_OUTPUT, 'authors-parsed.json.gz')
logger.info('Saving to {}'.format(filename))
with gzip.open(filename, 'wb') as fout:
fout.write(json.dumps(outdict).encode('utf-8'))
| true | true |
f72fa578f87bc853943bee3e503ed704b0ffd3fc | 32,916 | py | Python | plugin/lighthouse/composer/shell.py | chubbymaggie/lighthouse | e6c494a0c8dd2aca09b71b981e8c0c03d9078cdd | [
"MIT"
] | 1 | 2017-10-27T23:02:29.000Z | 2017-10-27T23:02:29.000Z | plugin/lighthouse/composer/shell.py | yrp604/lighthouse | b92a25906fb2513d8bfc4454c41e6378984d9ad9 | [
"MIT"
] | null | null | null | plugin/lighthouse/composer/shell.py | yrp604/lighthouse | b92a25906fb2513d8bfc4454c41e6378984d9ad9 | [
"MIT"
] | null | null | null | from .parser import *
from lighthouse.util import *
#------------------------------------------------------------------------------
# Composing Shell
#------------------------------------------------------------------------------
class ComposingShell(QtWidgets.QWidget):
"""
The ComposingShell UI for interactive coverage composition.
This class ties together all the individual components that make up
the Composing Shell, wrapping it up in a nice portable widget. This
includes the label sitting at the head of the shell, the text box
(the shell, a.k.a ComposingLine), and the composition parser.
In theory, multiple ComposingShell objects could be instantiated and
placed in various dialogs, forms, views, etc. These shells are fairly
independent, but obviously must communicate with the director.
"""
def __init__(self, director, model, table=None):
super(ComposingShell, self).__init__()
self.setObjectName(self.__class__.__name__)
# external entities
self._director = director
self._palette = director._palette
self._model = model
self._table = table
# command / input
self._search_text = ""
self._command_timer = QtCore.QTimer()
# the last known user AST
self._last_ast = None
# composition parser related members
self._parser = CompositionParser()
self._parser_error = None
self._parsed_tokens = []
self._shorthand = []
# configure the widget for use
self._ui_init()
#--------------------------------------------------------------------------
# Properties
#--------------------------------------------------------------------------
@property
def text(self):
"""
The existing shell text.
"""
return str(self._line.toPlainText())
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
# initialize a monospace font to use with our widget(s)
self._font = MonospaceFont()
self._font_metrics = QtGui.QFontMetricsF(self._font)
# initialize our ui elements
self._ui_init_shell()
self._ui_init_completer()
self._ui_init_signals()
self._ui_layout()
def _ui_init_shell(self):
"""
Initialize the shell UI elements.
"""
# the composer label at the head of the shell
self._line_label = QtWidgets.QLabel("Composer")
self._line_label.setStyleSheet("QLabel { margin: 0 1ex 0 1ex }")
self._line_label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
self._line_label.setFont(self._font)
self._line_label.setFixedWidth(self._line_label.sizeHint().width())
# the text box / shell / ComposingLine
self._line = ComposingLine()
# configure the shell background & default text color
palette = self._line.palette()
palette.setColor(QtGui.QPalette.Base, self._palette.overview_bg)
palette.setColor(QtGui.QPalette.Text, self._palette.composer_fg)
palette.setColor(QtGui.QPalette.WindowText, self._palette.composer_fg)
self._line.setPalette(palette)
def _ui_init_completer(self):
"""
Initialize the coverage hint UI elements.
"""
# NOTE/COMPAT:
if using_pyqt5:
self._completer_model = QtCore.QStringListModel([])
else:
self._completer_model = QtGui.QStringListModel([])
self._completer = QtWidgets.QCompleter(self)
self._completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
self._completer.setModelSorting(QtWidgets.QCompleter.CaseInsensitivelySortedModel)
self._completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self._completer.setModel(self._completer_model)
self._completer.setWrapAround(False)
self._completer.popup().setFont(self._font)
self._completer.setWidget(self._line)
def _ui_init_signals(self):
"""
Connect UI signals.
"""
# text changed in the shell
self._line.textChanged.connect(self._ui_shell_text_changed)
# cursor position changed in the shell
self._line.cursorPositionChanged.connect(self._ui_shell_cursor_changed)
# return key pressed in the shell
self._line.returnPressed.connect(self._ui_shell_return_pressed)
# register for cues from the director
self._director.coverage_created(self._internal_refresh)
self._director.coverage_deleted(self._internal_refresh)
self._director.coverage_modified(self._internal_refresh)
# register for cues from the model
self._model.layoutChanged.connect(self._ui_shell_text_changed)
def _ui_layout(self):
"""
Layout the major UI elements of the widget.
"""
# create a qt layout for the 'compser' (the shell)
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0,0,0,0)
#
# Shell Layout:
# [ [ 'Composer' ][ ComposingLine ... ] ]
#
layout.addWidget(self._line_label)
layout.addWidget(self._line)
# apply the widget layout
self.setLayout(layout)
#--------------------------------------------------------------------------
# Refresh
#--------------------------------------------------------------------------
def refresh(self):
"""
Public refresh of the shell.
"""
self._internal_refresh()
@idafast
def _internal_refresh(self):
"""
Internal refresh of the shell.
"""
self._refresh_hint_list()
def _refresh_hint_list(self):
"""
Refresh the shell coverage hint contents.
"""
# get the most recent coverage strings from the director
detailed_strings = [self._director.get_coverage_string(x) for x in self._director.coverage_names]
self._completer_model.setStringList(detailed_strings)
self._shorthand = [x[0] for x in detailed_strings]
# queue a UI coverage hint if necessary
self._ui_hint_coverage_refresh()
#--------------------------------------------------------------------------
# Signal Handlers
#--------------------------------------------------------------------------
def _ui_hint_tooltip(self, text, index):
"""
Display a non-intrusive error tooltip to the user.
"""
#
# hide the coverage hint if it is visible. things can look cluttered
# down by the shell if we're trying to show both.
#
self._ui_hint_coverage_hide()
# create a cursor and move it to the parse error location on the shell
cursor_tip = QtGui.QTextCursor(self._line.document())
cursor_tip.setPosition(index)
#
# using our carefully positioned cursor, we can now extract the relative
# pixel position of the parse error on the shell and map its global
# (absolute) pixel position on the screen.
#
position = self._line.mapToGlobal(self._line.cursorRect(cursor_tip).topLeft())
# draw the tooltip at the computed parse error position
x = QtWidgets.QToolTip.showText(position, text)
def _ui_shell_cursor_changed(self):
"""
Cursor position changed in the shell.
"""
self._ui_hint_coverage_refresh()
def _ui_shell_text_changed(self):
"""
Text changed in the shell.
"""
text = self.text
#
# a Search, eg '/DnsParse_'
#
if self.is_search(text):
self._execute_search(text)
self._highlight_search()
return
# not a search query clear any lingering filters for it
else:
self._model.filter_string("")
#
# a Jump, eg '0x804010a' or 'sub_1400016F0'
#
if self.is_jump(text) and self._table:
self._line_label.setText("Jump")
self._highlight_jump()
return
#
# a Composition, eg '(A | B) - C'
#
self._execute_composition(text)
self._highlight_composition()
self._ui_hint_coverage_refresh()
def _ui_shell_return_pressed(self):
"""
Return / Enter pressed in the shell.
The user pressed 'enter' in the shell, this means we want to try
and save their composition as a new coverage set to the director.
"""
text = self.text
# a search query has no accept state, nothing to do
if self.is_search(text):
return
# jump to the function entry containing the requested address
if self.is_jump(text) and self._table:
self._execute_jump(text)
return
# attempt to save the user crafted composition
self._accept_composition()
#--------------------------------------------------------------------------
# Search
#--------------------------------------------------------------------------
@staticmethod
def is_search(text):
"""
Check if a string (text) looks like a search query.
A search query is used to filter functions listed in the coverage
overview table based on their name.
eg: text = '/DnsParse_'
"""
return (text and text[0] == "/")
def _execute_search(self, text):
"""
Execute the search semantics.
"""
self._search_text = text[1:]
#
# if the user input is only "/" (starting to type something), hint
# that they are entering the Search mode. nothing else to do!
#
if text == "/":
self._line_label.setText("Search")
return
#
# stop an existing command timer if there is one running. we are about
# to schedule a new one or execute inline. so the old/deferred command
# is no longer needed.
#
self._command_timer.stop()
#
# if the functions list is HUGE, we want to defer the filtering until
# we think the user has stopped typing as each pass may take awhile
# to compute (while blocking the main thread...)
#
if self._director.metadata.is_big():
self._command_timer = singleshot(1000, self._execute_search_internal)
self._command_timer.start()
#
# the database is not *massive*, let's execute the search immediately
#
else:
self._execute_search_internal()
# done
return
def _execute_search_internal(self):
"""
Execute the actual search filtering & coverage metrics.
"""
# the given text is a real search query, apply it as a filter now
self._model.filter_string(self._search_text)
# compute coverage % of the visible (filtered) results
percent = self._model.get_modeled_coverage_percent()
# show the coverage % of the search results in the shell label
self._line_label.setText("%1.2f%%" % percent)
def _highlight_search(self):
"""
Syntax highlight a search query.
"""
self._line.setUpdatesEnabled(False)
################# UPDATES DISABLED #################
# clear any existing text colors
self._color_clear()
# color search based on if there are any matching results
if self._model.rowCount():
self._color_text(self._palette.valid_text, start=1)
else:
self._color_text(self._palette.invalid_text, start=1)
################# UPDATES ENABLED #################
self._line.setUpdatesEnabled(True)
# done
return
#--------------------------------------------------------------------------
# Jump
#--------------------------------------------------------------------------
def is_jump(self, text):
"""
Check if a string (text) looks like a jump query.
A jump query is used to jump to a function in the coverage overview
table based on their address.
eg: text = '0x8040100', or 'sub_1400016F0'
"""
return self._compute_jump(text) != 0
def _compute_jump(self, text):
"""
Compute the function address destination of a jump target from a string.
eg: text = '0x8040100', or 'sub_8040100' --> jump to function 0x8040100
"""
text = text.strip()
#
# if the user input is less than two characters, we automatically
# dismiss it as a valid jump target. the primary reasons for this
# is to avoid possible shorthand parsing clashes.
#
# eg: imagine the user has a valid function named 'A' that they want to
# jump to - well we actually choose to ignore that request here.
#
# We favor the importance of shorthand symbols as used in compositions.
#
if len(text) < 2:
return 0
#
# attempt to convert the user input from a hex number eg '0x8040105'
# to its corresponding function address validated by the director
#
try:
address = int(text, 16)
except ValueError:
pass
else:
function_metadata = self._director.metadata.get_function(address)
if function_metadata:
return function_metadata.address
#
# the user string did not translate to a parsable hex number (address)
# or the function it falls within could not be found in the director.
#
# attempt to convert the user input from a function name, eg 'main',
# or 'sub_1400016F0' to a function address validated by the director.
#
# special case to make 'sub_*' prefixed user inputs case insensitive
if text.lower().startswith("sub_"):
text = "sub_" + text[4:].upper()
# look up the text function name within the director's metadata
function_metadata = self._director.metadata.get_function_by_name(text)
if function_metadata:
return function_metadata.address
#
# the user string did not translate to a function name that could
# be found in the director.
#
# failure, the user input (text) isn't a jump ...
return 0
def _execute_jump(self, text):
"""
Execute the jump semantics.
"""
assert self._table
# retrieve the jump target
function_address = self._compute_jump(text)
assert function_address
# select the function entry in the coverage overview table
self._table.selectRow(self._model.func2row[function_address])
self._table.scrollTo(
self._table.currentIndex(),
QtWidgets.QAbstractItemView.PositionAtCenter
)
def _highlight_jump(self):
"""
Syntax highlight a jump query.
"""
self._line.setUpdatesEnabled(False)
################# UPDATES DISABLED #################
# clear any existing text colors
self._color_clear()
# color jump
self._color_text(self._palette.valid_text)
################# UPDATES ENABLED #################
self._line.setUpdatesEnabled(True)
# done
return
#--------------------------------------------------------------------------
# Composition
#--------------------------------------------------------------------------
def _execute_composition(self, text):
"""
Execute a composition query.
"""
# reset the shell head text
self._line_label.setText("Composer")
# attempt to parse & execute a composition
try:
# clear any previous parse attempts/failures
self._parser_error = None
# attempt to parse the user input against the composition grammar
self._parsed_tokens, ast = self._parser.parse(text, self._shorthand)
# if the AST changed since the last parse, inform the director
if not ast_equal(self._last_ast, ast):
self._director.cache_composition(ast)
# save the newly parsed ast
self._last_ast = ast
# parse failure
except ParseError as e:
self._parser_error = e
#
# even though we failed to generate an AST that can be evaluated
# by the director, we still want to save the list of tokens parsed.
# these tokens will still be used for basic syntax highlighting.
#
self._parsed_tokens = e.parsed_tokens
# done
return True
def _highlight_composition(self):
"""
Syntax highlight a composition.
"""
self._line.setUpdatesEnabled(False)
################# UPDATES DISABLED #################
# clear any existing text colors
self._color_clear()
# the parse failed, so there will be invalid text to highlight
if self._parser_error:
self._color_invalid()
# paint any valid tokens
self._color_tokens()
################# UPDATES ENABLED #################
self._line.setUpdatesEnabled(True)
# done
return
def _accept_composition(self):
"""
Save the user crafted composition to the director.
"""
#
# if there's an existing parse error on the shell, there's nothing we
# can do but pop a hint for the user and have them try again
#
if self._parser_error:
self._ui_hint_tooltip("Invalid Composition", self._parser_error.error_index)
return
#
# While the user is picking a name for the new composite, we might as well
# try and cache it asynchronously :-). kick the caching off now.
#
self._director.cache_composition(self._last_ast, force=True)
#
# the user has entered a valid composition that we have parsed. we
# want to save this to the director, but first we need a name for the
# new composition. pop a simple dialog prompting the user for a
# composition name
#
ok, coverage_name = prompt_string(
"Composition Name:",
"Please enter a name for this composition",
"COMP_%s" % self.text
)
# the user did not enter a coverage name or hit cancel - abort the save
if not (ok and coverage_name):
return
#
# all good, ask the director to save the last composition
# composition under the given coverage name
#
self._director.add_composition(coverage_name, self._last_ast)
# switch to the newly created composition
self._director.select_coverage(coverage_name)
#--------------------------------------------------------------------------
# Coverage Hint
#--------------------------------------------------------------------------
def _ui_hint_coverage_refresh(self):
"""
Draw the coverage hint as applicable.
"""
#
# if the shell is not focused (or empty), don't bother to show a hint
# as it frequently gets in the way and is really annoying...
#
if not (self._line.hasFocus() or self.text):
return
# scrape info from the current shell text state
cursor_index = self._line.textCursor().position()
text_token = self._get_cursor_coverage_token(cursor_index)
#
# if the user's text cursor is touching the index that produced the
# parse error (assuming there was one) ...
#
if self._parser_error and self._parser_error.error_index == cursor_index:
#
# if the parse error indicates the parse failed because it expected
# a coverage token but didn't get one, show the complete coverage
# list. The user should know their list of options bro.
#
if self._parser_error.expected == TokenCoverageSingle:
self._ui_hint_coverage_show()
#
# if the user's text cursor is touching a valid coverage token, we want
# to pop a hint that shows the details for the coverage matching that
# explicit token / shorthand. It's a subtle convenience :-)
#
elif text_token and (text_token.type == "COVERAGE_TOKEN"):
self._ui_hint_coverage_show(text_token.value)
#
# if the user's text cursor is not touching any text index of interest,
# there's no reason for us to show any sort of hints. be sure any hints
# are hidden.
#
else:
self._ui_hint_coverage_hide()
# done
return
def _ui_hint_coverage_show(self, prefix=''):
"""
Show the coverage hint at the shell's cursor position.
Optionally, one can specify a prefix (eg, the shorthand 'A') to
limit the scope of coverage items hinted.
"""
#
# if the completer is already visible and showing the requested prefix,
# then we have nothing to do. this will help mitigate refresh flickers
#
if self._completer.popup().isVisible() and \
self._completer.completionPrefix() == prefix:
return
# if there was anything previously selected in the popup, clear it now
self._completer.popup().clearSelection()
# show only hints matching the given prefix
# eg: prefix = 'A' will show only entry 'A - 42.30% - drcov.8...'
self._completer.setCompletionPrefix(prefix)
# specify the position and size of the hint popup
cr = self._line.cursorRect()
cr.setWidth(self._completer.popup().sizeHintForColumn(0))
# show the coverage hint popup
self._completer.complete(cr)
self._completer.popup().repaint() # reduces hint flicker on the Hot Shell
# done
return
def _ui_hint_coverage_hide(self):
"""
Hide the coverage hint.
"""
self._completer.popup().hide()
def _get_cursor_coverage_token(self, index):
"""
Get the coverage token touching the cursor (if there is one).
"""
# iterate through the list of parsed tokens on the line edit / shell
for text_token in self._parsed_tokens:
# skip any non-coverage text tokens
if not text_token.type == "COVERAGE_TOKEN":
continue
# if this coverage text token touches our cursor, return it
if text_token.span[0] <= index <= text_token.span[1]:
return text_token
# no coverage token on either side of the cursor
return None
#--------------------------------------------------------------------------
# Composition Highlighting
#--------------------------------------------------------------------------
def _color_tokens(self):
"""
Syntax highlight the valid composition tokens.
"""
# more code-friendly, readable aliases
TOKEN_COLORS = self._palette.TOKEN_COLORS
#
# in order to syntax highlight text of interest, we must use a text
# cursor as the vehicle to move around the text box (shell) and
# manipulate its contents (eg, painting colors)
#
# this is simply the way Qt exposes this functionality
#
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# configure text formatting properties we want our cursor to apply
highlight = QtGui.QTextCharFormat()
highlight.setFontWeight(QtGui.QFont.Bold) # bolds text we 'type'
#
# we are about to start painting our text, but we want to disable the
# shell from emitting any textChanged/cursorMoved kind of signals
# that originate from our painting code.
#
# we use the blockSignals gateways below to disable/enable the signals
# for the duration of our painting.
#
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# iterate through every parsed token, and paint it
for token in self._parsed_tokens:
# if the palette doesn't define a color for this token, ignore it
if token.type not in TOKEN_COLORS:
continue
# alias the start and end indexes of the text token to paint
token_start, token_end = token.span
# 'click' and 'drag' to select the token text
cursor.setPosition(token_start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(token_end, QtGui.QTextCursor.KeepAnchor)
# configure the colors/style for this explicit token
#highlight.setBackground(QtGui.QBrush(QtGui.QColor(TOKEN_COLORS[token.type])))
highlight.setForeground(QtGui.QBrush(QtGui.QColor(TOKEN_COLORS[token.type])))
cursor.setCharFormat(highlight)
#
# we are done painting all the parsed tokens. let's restore the user
# cursor back to its original state so they are none-the-wiser
#
cursor.setPosition(cursor_position)
cursor.setCharFormat(QtGui.QTextCharFormat())
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
def _color_invalid(self):
"""
Highlight the invalid (un-parsable) text.
Please read through the _color_tokens() function for a more
complete walkthrough of the text painting process.
"""
assert self._parser_error
# the invalid text starts from the token that caused a parse error
invalid_start = self._parser_error.error_index
invalid_text = self.text[invalid_start:]
# no invalid text? nothing to highlight I guess!
if not invalid_text:
return
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# setup the invalid text highlighter
invalid_color = self._palette.invalid_highlight
highlight = QtGui.QTextCharFormat()
highlight.setFontWeight(QtGui.QFont.Bold)
highlight.setBackground(QtGui.QBrush(QtGui.QColor(invalid_color)))
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# select the invalid text
cursor.setPosition(invalid_start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(len(self.text), QtGui.QTextCursor.KeepAnchor)
# insert a highlighted version of the invalid text
cursor.setCharFormat(highlight)
# reset the cursor position & style
cursor.setPosition(cursor_position)
cursor.setCharFormat(QtGui.QTextCharFormat())
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
#--------------------------------------------------------------------------
# General Highlighting
#--------------------------------------------------------------------------
def _color_clear(self):
"""
Clear any existing text colors.
"""
self._color_text()
def _color_text(self, color=None, start=0, end=0):
"""
Color shell text with the given color.
"""
# if no end was specified, apply the style till the end of input
if end == 0:
end = len(self.text)
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# setup a simple font coloring (or clearing) text format
simple = QtGui.QTextCharFormat()
if color:
simple.setForeground(QtGui.QBrush(QtGui.QColor(color)))
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# select the entire line
cursor.setPosition(start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
# set all the text to the simple format
cursor.setCharFormat(simple)
# reset the cursor position & style
cursor.setPosition(cursor_position)
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
#------------------------------------------------------------------------------
# Composing Line
#------------------------------------------------------------------------------
class ComposingLine(QtWidgets.QPlainTextEdit):
"""
The textbox UI where user compositions are entered (typed).
While this a QLineEdit may appear to be more appropriate for our
'Composing Shell', its support for syntax highlighting like features
are completely absent.
QPlainTextEdit has much better support for coloring or highlighting
entered text, so we subclass from it and make a best effort attempt
to make it appear and act like a QLineEdit 'shell'
"""
#
# QLineEdit has a signal called 'returnPressed' which fires when the
# user hits 'return' or 'enter'. This is a convenient signal, but
# QPlainTextEdit does *not* have an equivalent.
#
# We define and fire this signal ourself for consistency and the same
# conveniences as the one QLineEdit offers.
#
returnPressed = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(ComposingLine, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
# configure the widget for use
self._ui_init()
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
"""
Initialize UI elements.
"""
# initialize a monospace font to use with our widget(s)
self._font = MonospaceFont()
self._font_metrics = QtGui.QFontMetricsF(self._font)
self.setFont(self._font)
# configure the QPlainTextEdit to appear and act as much like a
# QLineEdit as possible (a single line text box)
self.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setTabChangesFocus(True)
self.setMaximumBlockCount(1)
# set the height of the textbox based on some arbitrary math :D
LINE_PADDING = self.document().documentMargin()*2
line_height = self._font_metrics.height() + LINE_PADDING - 2
self.setFixedHeight(line_height)
#--------------------------------------------------------------------------
# QPlainTextEdit Overloads
#--------------------------------------------------------------------------
def keyPressEvent(self, e):
"""
Overload of the key press event.
"""
# trap the return/enter key event
if e.key() == QtCore.Qt.Key_Return or \
e.key() == QtCore.Qt.Key_Enter:
#
# fire our convenience signal notifying listerns that the user
# pressed enter. this signal firing indicates the user is
# probably trying to complete their query / input.
#
self.returnPressed.emit()
#
# now we must consume the keypress so it doesn't get passed on
# to any other widgets/handlers/put in the text box
#
e.accept()
# business as usual
else:
super(ComposingLine, self).keyPressEvent(e)
def timerEvent(self, e):
"""
Stubbed out to prevent the QPlainTextEdit selection autoscroll.
"""
return
| 32.981964 | 105 | 0.571363 | from .parser import *
from lighthouse.util import *
class ComposingShell(QtWidgets.QWidget):
def __init__(self, director, model, table=None):
super(ComposingShell, self).__init__()
self.setObjectName(self.__class__.__name__)
self._director = director
self._palette = director._palette
self._model = model
self._table = table
self._search_text = ""
self._command_timer = QtCore.QTimer()
self._last_ast = None
self._parser = CompositionParser()
self._parser_error = None
self._parsed_tokens = []
self._shorthand = []
self._ui_init()
@property
def text(self):
return str(self._line.toPlainText())
def _ui_init(self):
self._font = MonospaceFont()
self._font_metrics = QtGui.QFontMetricsF(self._font)
self._ui_init_shell()
self._ui_init_completer()
self._ui_init_signals()
self._ui_layout()
def _ui_init_shell(self):
self._line_label = QtWidgets.QLabel("Composer")
self._line_label.setStyleSheet("QLabel { margin: 0 1ex 0 1ex }")
self._line_label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
self._line_label.setFont(self._font)
self._line_label.setFixedWidth(self._line_label.sizeHint().width())
self._line = ComposingLine()
palette = self._line.palette()
palette.setColor(QtGui.QPalette.Base, self._palette.overview_bg)
palette.setColor(QtGui.QPalette.Text, self._palette.composer_fg)
palette.setColor(QtGui.QPalette.WindowText, self._palette.composer_fg)
self._line.setPalette(palette)
def _ui_init_completer(self):
if using_pyqt5:
self._completer_model = QtCore.QStringListModel([])
else:
self._completer_model = QtGui.QStringListModel([])
self._completer = QtWidgets.QCompleter(self)
self._completer.setCompletionMode(QtWidgets.QCompleter.PopupCompletion)
self._completer.setModelSorting(QtWidgets.QCompleter.CaseInsensitivelySortedModel)
self._completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self._completer.setModel(self._completer_model)
self._completer.setWrapAround(False)
self._completer.popup().setFont(self._font)
self._completer.setWidget(self._line)
def _ui_init_signals(self):
self._line.textChanged.connect(self._ui_shell_text_changed)
self._line.cursorPositionChanged.connect(self._ui_shell_cursor_changed)
self._line.returnPressed.connect(self._ui_shell_return_pressed)
self._director.coverage_created(self._internal_refresh)
self._director.coverage_deleted(self._internal_refresh)
self._director.coverage_modified(self._internal_refresh)
self._model.layoutChanged.connect(self._ui_shell_text_changed)
def _ui_layout(self):
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins(0,0,0,0)
layout.addWidget(self._line_label)
layout.addWidget(self._line)
self.setLayout(layout)
def refresh(self):
self._internal_refresh()
@idafast
def _internal_refresh(self):
self._refresh_hint_list()
def _refresh_hint_list(self):
detailed_strings = [self._director.get_coverage_string(x) for x in self._director.coverage_names]
self._completer_model.setStringList(detailed_strings)
self._shorthand = [x[0] for x in detailed_strings]
self._ui_hint_coverage_refresh()
def _ui_hint_tooltip(self, text, index):
#
self._ui_hint_coverage_hide()
# create a cursor and move it to the parse error location on the shell
cursor_tip = QtGui.QTextCursor(self._line.document())
cursor_tip.setPosition(index)
#
# using our carefully positioned cursor, we can now extract the relative
# pixel position of the parse error on the shell and map its global
# (absolute) pixel position on the screen.
#
position = self._line.mapToGlobal(self._line.cursorRect(cursor_tip).topLeft())
# draw the tooltip at the computed parse error position
x = QtWidgets.QToolTip.showText(position, text)
def _ui_shell_cursor_changed(self):
self._ui_hint_coverage_refresh()
def _ui_shell_text_changed(self):
text = self.text
#
# a Search, eg '/DnsParse_'
#
if self.is_search(text):
self._execute_search(text)
self._highlight_search()
return
# not a search query clear any lingering filters for it
else:
self._model.filter_string("")
#
# a Jump, eg '0x804010a' or 'sub_1400016F0'
#
if self.is_jump(text) and self._table:
self._line_label.setText("Jump")
self._highlight_jump()
return
#
# a Composition, eg '(A | B) - C'
#
self._execute_composition(text)
self._highlight_composition()
self._ui_hint_coverage_refresh()
def _ui_shell_return_pressed(self):
text = self.text
# a search query has no accept state, nothing to do
if self.is_search(text):
return
# jump to the function entry containing the requested address
if self.is_jump(text) and self._table:
self._execute_jump(text)
return
# attempt to save the user crafted composition
self._accept_composition()
#--------------------------------------------------------------------------
# Search
#--------------------------------------------------------------------------
@staticmethod
def is_search(text):
return (text and text[0] == "/")
def _execute_search(self, text):
self._search_text = text[1:]
#
# if the user input is only "/" (starting to type something), hint
# that they are entering the Search mode. nothing else to do!
#
if text == "/":
self._line_label.setText("Search")
return
#
# stop an existing command timer if there is one running. we are about
# to schedule a new one or execute inline. so the old/deferred command
# is no longer needed.
#
self._command_timer.stop()
#
# if the functions list is HUGE, we want to defer the filtering until
# we think the user has stopped typing as each pass may take awhile
# to compute (while blocking the main thread...)
#
if self._director.metadata.is_big():
self._command_timer = singleshot(1000, self._execute_search_internal)
self._command_timer.start()
#
# the database is not *massive*, let's execute the search immediately
else:
self._execute_search_internal()
return
def _execute_search_internal(self):
self._model.filter_string(self._search_text)
percent = self._model.get_modeled_coverage_percent()
self._line_label.setText("%1.2f%%" % percent)
def _highlight_search(self):
self._line.setUpdatesEnabled(False)
self._table.scrollTo(
self._table.currentIndex(),
QtWidgets.QAbstractItemView.PositionAtCenter
)
def _highlight_jump(self):
self._line.setUpdatesEnabled(False)
ady visible and showing the requested prefix,
# then we have nothing to do. this will help mitigate refresh flickers
#
if self._completer.popup().isVisible() and \
self._completer.completionPrefix() == prefix:
return
# if there was anything previously selected in the popup, clear it now
self._completer.popup().clearSelection()
# show only hints matching the given prefix
# eg: prefix = 'A' will show only entry 'A - 42.30% - drcov.8...'
self._completer.setCompletionPrefix(prefix)
# specify the position and size of the hint popup
cr = self._line.cursorRect()
cr.setWidth(self._completer.popup().sizeHintForColumn(0))
# show the coverage hint popup
self._completer.complete(cr)
self._completer.popup().repaint() # reduces hint flicker on the Hot Shell
# done
return
def _ui_hint_coverage_hide(self):
self._completer.popup().hide()
def _get_cursor_coverage_token(self, index):
# iterate through the list of parsed tokens on the line edit / shell
for text_token in self._parsed_tokens:
# skip any non-coverage text tokens
if not text_token.type == "COVERAGE_TOKEN":
continue
# if this coverage text token touches our cursor, return it
if text_token.span[0] <= index <= text_token.span[1]:
return text_token
# no coverage token on either side of the cursor
return None
#--------------------------------------------------------------------------
# Composition Highlighting
#--------------------------------------------------------------------------
def _color_tokens(self):
# more code-friendly, readable aliases
TOKEN_COLORS = self._palette.TOKEN_COLORS
#
# in order to syntax highlight text of interest, we must use a text
# cursor as the vehicle to move around the text box (shell) and
# manipulate its contents (eg, painting colors)
#
# this is simply the way Qt exposes this functionality
#
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# configure text formatting properties we want our cursor to apply
highlight = QtGui.QTextCharFormat()
highlight.setFontWeight(QtGui.QFont.Bold) # bolds text we 'type'
#
# we are about to start painting our text, but we want to disable the
# shell from emitting any textChanged/cursorMoved kind of signals
# that originate from our painting code.
#
# we use the blockSignals gateways below to disable/enable the signals
# for the duration of our painting.
#
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# iterate through every parsed token, and paint it
for token in self._parsed_tokens:
# if the palette doesn't define a color for this token, ignore it
if token.type not in TOKEN_COLORS:
continue
token_start, token_end = token.span
cursor.setPosition(token_start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(token_end, QtGui.QTextCursor.KeepAnchor)
highlight.setForeground(QtGui.QBrush(QtGui.QColor(TOKEN_COLORS[token.type])))
cursor.setCharFormat(highlight)
# cursor back to its original state so they are none-the-wiser
#
cursor.setPosition(cursor_position)
cursor.setCharFormat(QtGui.QTextCharFormat())
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
def _color_invalid(self):
assert self._parser_error
# the invalid text starts from the token that caused a parse error
invalid_start = self._parser_error.error_index
invalid_text = self.text[invalid_start:]
# no invalid text? nothing to highlight I guess!
if not invalid_text:
return
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# setup the invalid text highlighter
invalid_color = self._palette.invalid_highlight
highlight = QtGui.QTextCharFormat()
highlight.setFontWeight(QtGui.QFont.Bold)
highlight.setBackground(QtGui.QBrush(QtGui.QColor(invalid_color)))
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# select the invalid text
cursor.setPosition(invalid_start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(len(self.text), QtGui.QTextCursor.KeepAnchor)
# insert a highlighted version of the invalid text
cursor.setCharFormat(highlight)
# reset the cursor position & style
cursor.setPosition(cursor_position)
cursor.setCharFormat(QtGui.QTextCharFormat())
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
#--------------------------------------------------------------------------
# General Highlighting
#--------------------------------------------------------------------------
def _color_clear(self):
self._color_text()
def _color_text(self, color=None, start=0, end=0):
# if no end was specified, apply the style till the end of input
if end == 0:
end = len(self.text)
# alias the user cursor, and save its original (current) position
cursor = self._line.textCursor()
cursor_position = cursor.position()
# setup a simple font coloring (or clearing) text format
simple = QtGui.QTextCharFormat()
if color:
simple.setForeground(QtGui.QBrush(QtGui.QColor(color)))
self._line.blockSignals(True)
################# UPDATES DISABLED #################
# select the entire line
cursor.setPosition(start, QtGui.QTextCursor.MoveAnchor)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
# set all the text to the simple format
cursor.setCharFormat(simple)
# reset the cursor position & style
cursor.setPosition(cursor_position)
self._line.setTextCursor(cursor)
################# UPDATES ENABLED #################
self._line.blockSignals(False)
# done
return
#------------------------------------------------------------------------------
# Composing Line
#------------------------------------------------------------------------------
class ComposingLine(QtWidgets.QPlainTextEdit):
#
# QLineEdit has a signal called 'returnPressed' which fires when the
# user hits 'return' or 'enter'. This is a convenient signal, but
# QPlainTextEdit does *not* have an equivalent.
#
# We define and fire this signal ourself for consistency and the same
# conveniences as the one QLineEdit offers.
#
returnPressed = QtCore.pyqtSignal()
def __init__(self, parent=None):
super(ComposingLine, self).__init__(parent)
self.setObjectName(self.__class__.__name__)
# configure the widget for use
self._ui_init()
#--------------------------------------------------------------------------
# Initialization - UI
#--------------------------------------------------------------------------
def _ui_init(self):
# initialize a monospace font to use with our widget(s)
self._font = MonospaceFont()
self._font_metrics = QtGui.QFontMetricsF(self._font)
self.setFont(self._font)
# configure the QPlainTextEdit to appear and act as much like a
# QLineEdit as possible (a single line text box)
self.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.setLineWrapMode(QtWidgets.QPlainTextEdit.NoWrap)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setTabChangesFocus(True)
self.setMaximumBlockCount(1)
# set the height of the textbox based on some arbitrary math :D
LINE_PADDING = self.document().documentMargin()*2
line_height = self._font_metrics.height() + LINE_PADDING - 2
self.setFixedHeight(line_height)
#--------------------------------------------------------------------------
# QPlainTextEdit Overloads
#--------------------------------------------------------------------------
def keyPressEvent(self, e):
# trap the return/enter key event
if e.key() == QtCore.Qt.Key_Return or \
e.key() == QtCore.Qt.Key_Enter:
#
# fire our convenience signal notifying listerns that the user
# pressed enter. this signal firing indicates the user is
# probably trying to complete their query / input.
#
self.returnPressed.emit()
#
# now we must consume the keypress so it doesn't get passed on
e.accept()
else:
super(ComposingLine, self).keyPressEvent(e)
def timerEvent(self, e):
return
| true | true |
f72fa5c26a43311b6bb327f0a6c60d08da6bf7f5 | 122 | py | Python | jobbing/models_remote/__init__.py | davidall-amdocs/jobbing | b13311da07606366dfbe2eb737483a5820038557 | [
"Apache-2.0"
] | null | null | null | jobbing/models_remote/__init__.py | davidall-amdocs/jobbing | b13311da07606366dfbe2eb737483a5820038557 | [
"Apache-2.0"
] | null | null | null | jobbing/models_remote/__init__.py | davidall-amdocs/jobbing | b13311da07606366dfbe2eb737483a5820038557 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# flake8: noqa
from __future__ import absolute_import
from jobbing.models_remote.zip_code import ZipCode | 20.333333 | 50 | 0.819672 |
from __future__ import absolute_import
from jobbing.models_remote.zip_code import ZipCode | true | true |
f72fa5d84e135d78f70840c368783f3034e3d49b | 2,946 | py | Python | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/AOIinfo.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | null | null | null | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/AOIinfo.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | null | null | null | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/domain/AOIinfo.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AOIinfo(object):
def __init__(self):
self._adcode = None
self._area = None
self._distance = None
self._id = None
self._location = None
self._name = None
@property
def adcode(self):
return self._adcode
@adcode.setter
def adcode(self, value):
self._adcode = value
@property
def area(self):
return self._area
@area.setter
def area(self, value):
self._area = value
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, value):
self._distance = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
self._location = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.adcode:
if hasattr(self.adcode, 'to_alipay_dict'):
params['adcode'] = self.adcode.to_alipay_dict()
else:
params['adcode'] = self.adcode
if self.area:
if hasattr(self.area, 'to_alipay_dict'):
params['area'] = self.area.to_alipay_dict()
else:
params['area'] = self.area
if self.distance:
if hasattr(self.distance, 'to_alipay_dict'):
params['distance'] = self.distance.to_alipay_dict()
else:
params['distance'] = self.distance
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.location:
if hasattr(self.location, 'to_alipay_dict'):
params['location'] = self.location.to_alipay_dict()
else:
params['location'] = self.location
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AOIinfo()
if 'adcode' in d:
o.adcode = d['adcode']
if 'area' in d:
o.area = d['area']
if 'distance' in d:
o.distance = d['distance']
if 'id' in d:
o.id = d['id']
if 'location' in d:
o.location = d['location']
if 'name' in d:
o.name = d['name']
return o
| 25.396552 | 67 | 0.5241 |
import json
from alipay.aop.api.constant.ParamConstants import *
class AOIinfo(object):
def __init__(self):
self._adcode = None
self._area = None
self._distance = None
self._id = None
self._location = None
self._name = None
@property
def adcode(self):
return self._adcode
@adcode.setter
def adcode(self, value):
self._adcode = value
@property
def area(self):
return self._area
@area.setter
def area(self, value):
self._area = value
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, value):
self._distance = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
self._location = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.adcode:
if hasattr(self.adcode, 'to_alipay_dict'):
params['adcode'] = self.adcode.to_alipay_dict()
else:
params['adcode'] = self.adcode
if self.area:
if hasattr(self.area, 'to_alipay_dict'):
params['area'] = self.area.to_alipay_dict()
else:
params['area'] = self.area
if self.distance:
if hasattr(self.distance, 'to_alipay_dict'):
params['distance'] = self.distance.to_alipay_dict()
else:
params['distance'] = self.distance
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.location:
if hasattr(self.location, 'to_alipay_dict'):
params['location'] = self.location.to_alipay_dict()
else:
params['location'] = self.location
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AOIinfo()
if 'adcode' in d:
o.adcode = d['adcode']
if 'area' in d:
o.area = d['area']
if 'distance' in d:
o.distance = d['distance']
if 'id' in d:
o.id = d['id']
if 'location' in d:
o.location = d['location']
if 'name' in d:
o.name = d['name']
return o
| true | true |
f72fa5dd5997ddce41d5321eaba142024cd484ab | 21,887 | py | Python | setup.py | pexip/os-mod-wsgi | 969aee194275c599dd769c645c080dceeea5639e | [
"Apache-2.0"
] | 1 | 2019-04-22T16:49:34.000Z | 2019-04-22T16:49:34.000Z | setup.py | pexip/os-mod-wsgi | 969aee194275c599dd769c645c080dceeea5639e | [
"Apache-2.0"
] | null | null | null | setup.py | pexip/os-mod-wsgi | 969aee194275c599dd769c645c080dceeea5639e | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import sys
import fnmatch
import subprocess
import tarfile
import shutil
import stat
import re
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
from setuptools import setup
from distutils.core import Extension
from distutils.sysconfig import get_config_var as get_python_config
from distutils.sysconfig import get_python_lib
# Before anything else, this setup.py uses some tricks to potentially
# install Apache. This can be from a local tarball, or from precompiled
# Apache binaries for Heroku and OpenShift environments downloaded from
# Amazon S3. Once they are installed, then the installation of the
# mod_wsgi package itself will be triggered, ensuring that it can be
# built against the precompiled Apache binaries which were installed.
#
# First work out whether we are actually running on either Heroku or
# OpenShift. If we are, then we identify the set of precompiled binaries
# we are to use and copy it into the Python installation.
PREFIX = 'https://s3.amazonaws.com'
BUCKET = os.environ.get('MOD_WSGI_REMOTE_S3_BUCKET_NAME', 'modwsgi.org')
REMOTE_TARBALL_NAME = os.environ.get('MOD_WSGI_REMOTE_PACKAGES_NAME')
LOCAL_TARBALL_FILE = os.environ.get('MOD_WSGI_LOCAL_PACKAGES_FILE')
TGZ_OPENSHIFT='mod_wsgi-packages-openshift-centos6-apache-2.4.12-1.tar.gz'
TGZ_HEROKU='mod_wsgi-packages-heroku-cedar14-apache-2.4.12-1.tar.gz'
if not REMOTE_TARBALL_NAME and not LOCAL_TARBALL_FILE:
if os.environ.get('OPENSHIFT_HOMEDIR'):
REMOTE_TARBALL_NAME = TGZ_OPENSHIFT
elif os.path.isdir('/app/.heroku'):
REMOTE_TARBALL_NAME = TGZ_HEROKU
REMOTE_TARBALL_URL = None
if LOCAL_TARBALL_FILE is None and REMOTE_TARBALL_NAME:
REMOTE_TARBALL_URL = '%s/%s/%s' % (PREFIX, BUCKET, REMOTE_TARBALL_NAME)
WITH_TARBALL_PACKAGE = False
if REMOTE_TARBALL_URL or LOCAL_TARBALL_FILE:
WITH_TARBALL_PACKAGE = True
# If we are doing an install, download the tarball and unpack it into
# the 'packages' subdirectory. We will then add everything in that
# directory as package data so that it will be installed into the Python
# installation.
if WITH_TARBALL_PACKAGE:
if REMOTE_TARBALL_URL:
if not os.path.isfile(REMOTE_TARBALL_NAME):
print('Downloading', REMOTE_TARBALL_URL)
urlretrieve(REMOTE_TARBALL_URL, REMOTE_TARBALL_NAME+'.download')
os.rename(REMOTE_TARBALL_NAME+'.download', REMOTE_TARBALL_NAME)
LOCAL_TARBALL_FILE = REMOTE_TARBALL_NAME
if LOCAL_TARBALL_FILE:
shutil.rmtree('src/packages', ignore_errors=True)
tar = tarfile.open(LOCAL_TARBALL_FILE)
tar.extractall('src/packages')
tar.close()
open('src/packages/__init__.py', 'a').close()
package_files = []
for root, dirs, files in os.walk('src/packages', topdown=False):
for name in files:
path = os.path.join(root, name).split('/', 1)[1]
package_files.append(path)
print('adding ', path)
print('Running setup for Apache')
setup(name = 'mod_wsgi-packages',
version = '1.0.0',
packages = ['mod_wsgi', 'mod_wsgi.packages'],
package_dir = {'mod_wsgi': 'src'},
package_data = {'mod_wsgi': package_files},
)
# From this point on we will now actually install mod_wsgi. First we need
# to work out what all the available source code files are that should be
# compiled.
source_files = [os.path.join('src/server', name) for name in
os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'src/server')) if fnmatch.fnmatch(name, '*.c')]
# Work out all the Apache specific compilation flags. This is done using
# the standard Apache apxs command unless we are installing our own build
# of Apache. In that case we use Python code to do the equivalent of apxs
# as apxs will not work due to paths not matching where it was installed.
def find_program(names, default=None, paths=[]):
for name in names:
for path in os.environ['PATH'].split(':') + paths:
program = os.path.join(path, name)
if os.path.exists(program):
return program
return default
APXS = os.environ.get('APXS')
WITH_HTTPD_PACKAGE = False
if APXS is None:
APXS = find_program(['mod_wsgi-apxs'],
paths=[os.path.dirname(sys.executable)])
if APXS is not None:
WITH_HTTPD_PACKAGE = True
if APXS is None:
APXS = find_program(['mod_wsgi-apxs', 'apxs2', 'apxs'],
'apxs', ['/usr/sbin', os.getcwd()])
elif not os.path.isabs(APXS):
APXS = find_program([APXS], APXS, ['/usr/sbin', os.getcwd()])
WITHOUT_APXS = False
WITH_WINDOWS_APACHE = None
WITH_MACOSX_APACHE = None
if not WITH_TARBALL_PACKAGE:
if not os.path.isabs(APXS) or not os.access(APXS, os.X_OK):
WITHOUT_APXS = True
if WITHOUT_APXS and os.name == 'nt':
APACHE_ROOTDIR = os.environ.get('MOD_WSGI_APACHE_ROOTDIR')
if APACHE_ROOTDIR:
if os.path.exists(APACHE_ROOTDIR):
WITH_WINDOWS_APACHE = APACHE_ROOTDIR
else:
raise RuntimeError('The Apache directory %r does not exist.' %
APACHE_ROOTDIR)
else:
if os.path.exists('c:\\Apache24'):
WITH_WINDOWS_APACHE = 'c:\\Apache24'
elif os.path.exists('c:\\Apache22'):
WITH_WINDOWS_APACHE = 'c:\\Apache22'
elif os.path.exists('c:\\Apache2'):
WITH_WINDOWS_APACHE = 'c:\\Apache2'
else:
raise RuntimeError('No Apache installation can be found. Set the '
'MOD_WSGI_APACHE_ROOTDIR environment to its location.')
elif WITHOUT_APXS and sys.platform == 'darwin':
WITH_MACOSX_APACHE = '/Applications/Xcode.app'
if WITHOUT_APXS and not WITH_WINDOWS_APACHE and not WITH_MACOSX_APACHE:
raise RuntimeError('The %r command appears not to be installed or '
'is not executable. Please check the list of prerequisites '
'in the documentation for this package and install any '
'missing Apache httpd server packages.' % APXS)
if WITH_WINDOWS_APACHE:
def get_apxs_config(name):
if name == 'INCLUDEDIR':
return WITH_WINDOWS_APACHE + '/include'
elif name == 'LIBEXECDIR':
return WITH_WINDOWS_APACHE + '/lib'
else:
return ''
def get_apr_includes():
return ''
def get_apu_includes():
return ''
elif WITH_MACOSX_APACHE:
def get_apxs_config(name):
if name == 'BINDIR':
return '/usr/bin'
elif name == 'SBINDIR':
return '/usr/sbin'
elif name == 'LIBEXECDIR':
return '/usr/libexec/apache2'
elif name == 'PROGNAME':
return 'httpd'
elif name == 'SHLIBPATH_VAR':
return 'DYLD_LIBRARY_PATH'
else:
return ''
def get_apr_includes():
return ''
def get_apu_includes():
return ''
elif WITH_TARBALL_PACKAGE:
SCRIPT_DIR = os.path.join(os.path.dirname(__file__), 'src', 'packages')
CONFIG_FILE = os.path.join(SCRIPT_DIR, 'apache/build/config_vars.mk')
CONFIG = {}
with open(CONFIG_FILE) as fp:
for line in fp.readlines():
name, value = line.split('=', 1)
name = name.strip()
value = value.strip()
CONFIG[name] = value
_varprog = re.compile(r'\$(\w+|(?:\{[^}]*\}|\([^)]*\)))')
def expand_vars(value):
if '$' not in value:
return value
i = 0
while True:
m = _varprog.search(value, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
elif name.startswith('(') and name.endswith(')'):
name = name[1:-1]
if name in CONFIG:
tail = value[j:]
value = value[:i] + CONFIG.get(name, '')
i = len(value)
value += tail
else:
i = j
return value
def get_apxs_config(name):
value = CONFIG.get(name, '')
sub_value = expand_vars(value)
while value != sub_value:
value = sub_value
sub_value = expand_vars(value)
return sub_value.replace('/mod_wsgi-packages/', SCRIPT_DIR+'/')
def get_apr_includes():
return ''
def get_apu_includes():
return ''
CONFIG['PREFIX'] = get_apxs_config('prefix')
CONFIG['TARGET'] = get_apxs_config('target')
CONFIG['SYSCONFDIR'] = get_apxs_config('sysconfdir')
CONFIG['INCLUDEDIR'] = get_apxs_config('includedir')
CONFIG['LIBEXECDIR'] = get_apxs_config('libexecdir')
CONFIG['BINDIR'] = get_apxs_config('bindir')
CONFIG['SBINDIR'] = get_apxs_config('sbindir')
CONFIG['PROGNAME'] = get_apxs_config('progname')
else:
def get_apxs_config(query):
p = subprocess.Popen([APXS, '-q', query],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
def get_apr_includes():
if not APR_CONFIG:
return ''
p = subprocess.Popen([APR_CONFIG, '--includes'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
def get_apu_includes():
if not APU_CONFIG:
return ''
p = subprocess.Popen([APU_CONFIG, '--includes'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
INCLUDEDIR = get_apxs_config('INCLUDEDIR')
CPPFLAGS = get_apxs_config('CPPFLAGS').split()
CFLAGS = get_apxs_config('CFLAGS').split()
EXTRA_INCLUDES = get_apxs_config('EXTRA_INCLUDES').split()
EXTRA_CPPFLAGS = get_apxs_config('EXTRA_CPPFLAGS').split()
EXTRA_CFLAGS = get_apxs_config('EXTRA_CFLAGS').split()
APR_CONFIG = get_apxs_config('APR_CONFIG')
APU_CONFIG = get_apxs_config('APU_CONFIG')
# Make sure that 'apr-1-config' exists. If it doesn't we may be running
# on MacOS X Sierra, which has decided to not provide either it or the
# 'apu-1-config' script and otherwise completely broken 'apxs'. In that
# case we manually set the locations of the Apache and APR header files.
if (not os.path.exists(APR_CONFIG) and
os.path.exists('/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk')):
INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apache2'
APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apr-1']
APU_INCLUDES = []
elif (not os.path.exists(APR_CONFIG) and
os.path.exists('/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk')):
INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apache2'
APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apr-1']
APU_INCLUDES = []
else:
APR_INCLUDES = get_apr_includes().split()
APU_INCLUDES = get_apu_includes().split()
if not os.path.exists(APR_CONFIG) and not INCLUDEDIR:
if sys.platform == 'darwin':
# Likely no Xcode application installed or location of SDK in
# Xcode has changed with a new release of Xcode application.
raise RuntimeError('No Apache installation can be found, do you '
'have the full Apple Xcode installed. It is not enough to '
'have just the xcode command line tools installed.')
else:
# Set INCLUDEDIR just to avoid having an empty path. Probably
# should raise an exception here.
INCLUDEDIR = '/usr/include'
# Write out apxs_config.py which caches various configuration related to
# Apache. For the case of using our own Apache build, this needs to
# calculate values dynamically based on where binaries were installed.
# This is necessary as on OpenShift the virtual environment gets copied
# for each gear to a different path. We can't therefore rely on a hard
# coded path.
BINDIR = get_apxs_config('BINDIR')
SBINDIR = get_apxs_config('SBINDIR')
PROGNAME = get_apxs_config('PROGNAME')
MPM_NAME = get_apxs_config('MPM_NAME')
LIBEXECDIR = get_apxs_config('LIBEXECDIR')
SHLIBPATH_VAR = get_apxs_config('SHLIBPATH_VAR')
APXS_CONFIG_TEMPLATE = """
import os
WITH_TARBALL_PACKAGE = %(WITH_TARBALL_PACKAGE)r
WITH_HTTPD_PACKAGE = %(WITH_HTTPD_PACKAGE)r
if WITH_HTTPD_PACKAGE:
from mod_wsgi_packages.httpd import __file__ as PACKAGES_ROOTDIR
PACKAGES_ROOTDIR = os.path.dirname(PACKAGES_ROOTDIR)
BINDIR = os.path.join(PACKAGES_ROOTDIR, 'bin')
SBINDIR = BINDIR
LIBEXECDIR = os.path.join(PACKAGES_ROOTDIR, 'modules')
SHLIBPATH = os.path.join(PACKAGES_ROOTDIR, 'lib')
elif WITH_TARBALL_PACKAGE:
from mod_wsgi.packages import __file__ as PACKAGES_ROOTDIR
PACKAGES_ROOTDIR = os.path.dirname(PACKAGES_ROOTDIR)
BINDIR = os.path.join(PACKAGES_ROOTDIR, 'apache', 'bin')
SBINDIR = BINDIR
LIBEXECDIR = os.path.join(PACKAGES_ROOTDIR, 'apache', 'modules')
SHLIBPATH = []
SHLIBPATH.append(os.path.join(PACKAGES_ROOTDIR, 'apr-util', 'lib'))
SHLIBPATH.append(os.path.join(PACKAGES_ROOTDIR, 'apr', 'lib'))
SHLIBPATH = ':'.join(SHLIBPATH)
else:
BINDIR = '%(BINDIR)s'
SBINDIR = '%(SBINDIR)s'
LIBEXECDIR = '%(LIBEXECDIR)s'
SHLIBPATH = ''
MPM_NAME = '%(MPM_NAME)s'
PROGNAME = '%(PROGNAME)s'
SHLIBPATH_VAR = '%(SHLIBPATH_VAR)s'
if os.path.exists(os.path.join(SBINDIR, PROGNAME)):
HTTPD = os.path.join(SBINDIR, PROGNAME)
elif os.path.exists(os.path.join(BINDIR, PROGNAME)):
HTTPD = os.path.join(BINDIR, PROGNAME)
else:
HTTPD = PROGNAME
if os.path.exists(os.path.join(SBINDIR, 'rotatelogs')):
ROTATELOGS = os.path.join(SBINDIR, 'rotatelogs')
elif os.path.exists(os.path.join(BINDIR, 'rotatelogs')):
ROTATELOGS = os.path.join(BINDIR, 'rotatelogs')
else:
ROTATELOGS = 'rotatelogs'
"""
with open(os.path.join(os.path.dirname(__file__),
'src/server/apxs_config.py'), 'w') as fp:
print(APXS_CONFIG_TEMPLATE % dict(
WITH_TARBALL_PACKAGE=WITH_TARBALL_PACKAGE,
WITH_HTTPD_PACKAGE=WITH_HTTPD_PACKAGE,
BINDIR=BINDIR, SBINDIR=SBINDIR, LIBEXECDIR=LIBEXECDIR,
MPM_NAME=MPM_NAME, PROGNAME=PROGNAME,
SHLIBPATH_VAR=SHLIBPATH_VAR), file=fp)
# Work out location of Python library and how to link it.
PYTHON_VERSION = get_python_config('VERSION')
if os.name == 'nt':
if hasattr(sys, 'real_prefix'):
PYTHON_LIBDIR = sys.real_prefix
else:
PYTHON_LIBDIR = get_python_config('BINDIR')
PYTHON_LDFLAGS = []
PYTHON_LDLIBS = ['%s/libs/python%s.lib' % (PYTHON_LIBDIR, PYTHON_VERSION),
'%s/lib/libhttpd.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libapr-1.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libaprutil-1.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libapriconv-1.lib' % WITH_WINDOWS_APACHE]
else:
PYTHON_LDVERSION = get_python_config('LDVERSION') or PYTHON_VERSION
PYTHON_LIBDIR = get_python_config('LIBDIR')
PYTHON_CFGDIR = get_python_lib(plat_specific=1, standard_lib=1) + '/config'
if PYTHON_LDVERSION and PYTHON_LDVERSION != PYTHON_VERSION:
PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, PYTHON_LDVERSION)
if not os.path.exists(PYTHON_CFGDIR):
PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, sys.platform)
PYTHON_LDFLAGS = ['-L%s' % PYTHON_LIBDIR, '-L%s' % PYTHON_CFGDIR]
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_LDVERSION]
if os.path.exists(os.path.join(PYTHON_LIBDIR,
'libpython%s.a' % PYTHON_VERSION)):
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
if os.path.exists(os.path.join(PYTHON_CFGDIR,
'libpython%s.a' % PYTHON_VERSION)):
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
# Create the final set of compilation flags to be used.
INCLUDE_DIRS = [INCLUDEDIR]
EXTRA_COMPILE_FLAGS = (EXTRA_INCLUDES + CPPFLAGS + EXTRA_CPPFLAGS +
CFLAGS + EXTRA_CFLAGS + APR_INCLUDES + APU_INCLUDES)
EXTRA_LINK_ARGS = PYTHON_LDFLAGS + PYTHON_LDLIBS
# Force adding of LD_RUN_PATH for platforms that may need it.
if os.name != 'nt':
LD_RUN_PATH = os.environ.get('LD_RUN_PATH', '')
LD_RUN_PATH += ':%s:%s' % (PYTHON_LIBDIR, PYTHON_CFGDIR)
LD_RUN_PATH = LD_RUN_PATH.lstrip(':')
os.environ['LD_RUN_PATH'] = LD_RUN_PATH
# On MacOS X, recent versions of Apple's Apache do not support compiling
# Apache modules with a target older than 10.8. This is because it
# screws up Apache APR % formats for apr_time_t, which breaks daemon
# mode queue time. For the target to be 10.8 or newer for now if Python
# installation supports older versions. This means that things will not
# build for older MacOS X versions. Deal with these when they occur.
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if target is None:
target = get_python_config('MACOSX_DEPLOYMENT_TARGET')
if target:
target_version = tuple(map(int, target.split('.')))
#assert target_version >= (10, 8), \
# 'Minimum of 10.8 for MACOSX_DEPLOYMENT_TARGET'
if target_version < (10, 8):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.8'
# Now add the definitions to build everything.
if os.name == 'nt':
extension_name = 'mod_wsgi.server.mod_wsgi'
else:
extension_name = 'mod_wsgi.server.mod_wsgi-py%s%s' % sys.version_info[:2]
extension = Extension(extension_name, source_files,
include_dirs=INCLUDE_DIRS, extra_compile_args=EXTRA_COMPILE_FLAGS,
extra_link_args=EXTRA_LINK_ARGS)
def _documentation():
result = []
prefix = 'docs/_build/html'
for root, dirs, files in os.walk(prefix, topdown=False):
for name in files:
if root == prefix:
result.append(os.path.join(root[len(prefix):], name))
else:
result.append(os.path.join(root[len(prefix)+1:], name))
return result
def _version():
path = 'src/server/wsgi_version.h'
pattern = r'#define MOD_WSGI_VERSION_STRING "(?P<version>[^"]*)"'
with open(path, 'r') as fp:
match = re.search(pattern, fp.read(), flags=re.MULTILINE)
return match.group('version')
# Final check to make sure a shared library for Python does actually
# exist. Warn if one doesn't as we really want a shared library.
SHARED_LIBRARY_WARNING = """
WARNING: The Python installation you are using does not appear to have
been installed with a shared library, or in the case of MacOS X, as a
framework. Where these are not present, the compilation of mod_wsgi may
fail, or if it does succeed, will result in extra memory being used by
all processes at run time as a result of the static library needing to
be loaded in its entirety to every process. It is highly recommended
that you reinstall the Python installation being used from source code,
supplying the '--enable-shared' option to the 'configure' script when
configuring the source code prior to building and installing it.
"""
if os.name != 'nt':
if (not get_python_config('Py_ENABLE_SHARED') and
not get_python_config('PYTHONFRAMEWORK')):
print(SHARED_LIBRARY_WARNING)
# Now finally run distutils.
long_description = open('README.rst').read()
setup(name = 'mod_wsgi',
version = _version(),
description = 'Installer for Apache/mod_wsgi.',
long_description = long_description,
author = 'Graham Dumpleton',
author_email = 'Graham.Dumpleton@gmail.com',
maintainer = 'Graham Dumpleton',
maintainer_email = 'Graham.Dumpleton@gmail.com',
url = 'http://www.modwsgi.org/',
bugtrack_url = 'https://github.com/GrahamDumpleton/mod_wsgi/issues',
license = 'Apache License, Version 2.0',
platforms = [],
download_url = None,
classifiers = [
'Development Status :: 6 - Mature',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server'
],
keywords = 'mod_wsgi wsgi apache',
packages = ['mod_wsgi', 'mod_wsgi.server', 'mod_wsgi.server.management',
'mod_wsgi.server.management.commands', 'mod_wsgi.docs',
'mod_wsgi.images'],
package_dir = {'mod_wsgi': 'src', 'mod_wsgi.docs': 'docs/_build/html',
'mod_wsgi.images': 'images'},
package_data = {'mod_wsgi.docs': _documentation(),
'mod_wsgi.images': ['snake-whiskey.jpg']},
ext_modules = [extension],
entry_points = { 'console_scripts':
['mod_wsgi-express = mod_wsgi.server:main'],},
zip_safe = False,
)
| 36.971284 | 160 | 0.670489 | from __future__ import print_function
import os
import sys
import fnmatch
import subprocess
import tarfile
import shutil
import stat
import re
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
from setuptools import setup
from distutils.core import Extension
from distutils.sysconfig import get_config_var as get_python_config
from distutils.sysconfig import get_python_lib
PREFIX = 'https://s3.amazonaws.com'
BUCKET = os.environ.get('MOD_WSGI_REMOTE_S3_BUCKET_NAME', 'modwsgi.org')
REMOTE_TARBALL_NAME = os.environ.get('MOD_WSGI_REMOTE_PACKAGES_NAME')
LOCAL_TARBALL_FILE = os.environ.get('MOD_WSGI_LOCAL_PACKAGES_FILE')
TGZ_OPENSHIFT='mod_wsgi-packages-openshift-centos6-apache-2.4.12-1.tar.gz'
TGZ_HEROKU='mod_wsgi-packages-heroku-cedar14-apache-2.4.12-1.tar.gz'
if not REMOTE_TARBALL_NAME and not LOCAL_TARBALL_FILE:
if os.environ.get('OPENSHIFT_HOMEDIR'):
REMOTE_TARBALL_NAME = TGZ_OPENSHIFT
elif os.path.isdir('/app/.heroku'):
REMOTE_TARBALL_NAME = TGZ_HEROKU
REMOTE_TARBALL_URL = None
if LOCAL_TARBALL_FILE is None and REMOTE_TARBALL_NAME:
REMOTE_TARBALL_URL = '%s/%s/%s' % (PREFIX, BUCKET, REMOTE_TARBALL_NAME)
WITH_TARBALL_PACKAGE = False
if REMOTE_TARBALL_URL or LOCAL_TARBALL_FILE:
WITH_TARBALL_PACKAGE = True
if WITH_TARBALL_PACKAGE:
if REMOTE_TARBALL_URL:
if not os.path.isfile(REMOTE_TARBALL_NAME):
print('Downloading', REMOTE_TARBALL_URL)
urlretrieve(REMOTE_TARBALL_URL, REMOTE_TARBALL_NAME+'.download')
os.rename(REMOTE_TARBALL_NAME+'.download', REMOTE_TARBALL_NAME)
LOCAL_TARBALL_FILE = REMOTE_TARBALL_NAME
if LOCAL_TARBALL_FILE:
shutil.rmtree('src/packages', ignore_errors=True)
tar = tarfile.open(LOCAL_TARBALL_FILE)
tar.extractall('src/packages')
tar.close()
open('src/packages/__init__.py', 'a').close()
package_files = []
for root, dirs, files in os.walk('src/packages', topdown=False):
for name in files:
path = os.path.join(root, name).split('/', 1)[1]
package_files.append(path)
print('adding ', path)
print('Running setup for Apache')
setup(name = 'mod_wsgi-packages',
version = '1.0.0',
packages = ['mod_wsgi', 'mod_wsgi.packages'],
package_dir = {'mod_wsgi': 'src'},
package_data = {'mod_wsgi': package_files},
)
source_files = [os.path.join('src/server', name) for name in
os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'src/server')) if fnmatch.fnmatch(name, '*.c')]
def find_program(names, default=None, paths=[]):
for name in names:
for path in os.environ['PATH'].split(':') + paths:
program = os.path.join(path, name)
if os.path.exists(program):
return program
return default
APXS = os.environ.get('APXS')
WITH_HTTPD_PACKAGE = False
if APXS is None:
APXS = find_program(['mod_wsgi-apxs'],
paths=[os.path.dirname(sys.executable)])
if APXS is not None:
WITH_HTTPD_PACKAGE = True
if APXS is None:
APXS = find_program(['mod_wsgi-apxs', 'apxs2', 'apxs'],
'apxs', ['/usr/sbin', os.getcwd()])
elif not os.path.isabs(APXS):
APXS = find_program([APXS], APXS, ['/usr/sbin', os.getcwd()])
WITHOUT_APXS = False
WITH_WINDOWS_APACHE = None
WITH_MACOSX_APACHE = None
if not WITH_TARBALL_PACKAGE:
if not os.path.isabs(APXS) or not os.access(APXS, os.X_OK):
WITHOUT_APXS = True
if WITHOUT_APXS and os.name == 'nt':
APACHE_ROOTDIR = os.environ.get('MOD_WSGI_APACHE_ROOTDIR')
if APACHE_ROOTDIR:
if os.path.exists(APACHE_ROOTDIR):
WITH_WINDOWS_APACHE = APACHE_ROOTDIR
else:
raise RuntimeError('The Apache directory %r does not exist.' %
APACHE_ROOTDIR)
else:
if os.path.exists('c:\\Apache24'):
WITH_WINDOWS_APACHE = 'c:\\Apache24'
elif os.path.exists('c:\\Apache22'):
WITH_WINDOWS_APACHE = 'c:\\Apache22'
elif os.path.exists('c:\\Apache2'):
WITH_WINDOWS_APACHE = 'c:\\Apache2'
else:
raise RuntimeError('No Apache installation can be found. Set the '
'MOD_WSGI_APACHE_ROOTDIR environment to its location.')
elif WITHOUT_APXS and sys.platform == 'darwin':
WITH_MACOSX_APACHE = '/Applications/Xcode.app'
if WITHOUT_APXS and not WITH_WINDOWS_APACHE and not WITH_MACOSX_APACHE:
raise RuntimeError('The %r command appears not to be installed or '
'is not executable. Please check the list of prerequisites '
'in the documentation for this package and install any '
'missing Apache httpd server packages.' % APXS)
if WITH_WINDOWS_APACHE:
def get_apxs_config(name):
if name == 'INCLUDEDIR':
return WITH_WINDOWS_APACHE + '/include'
elif name == 'LIBEXECDIR':
return WITH_WINDOWS_APACHE + '/lib'
else:
return ''
def get_apr_includes():
return ''
def get_apu_includes():
return ''
elif WITH_MACOSX_APACHE:
def get_apxs_config(name):
if name == 'BINDIR':
return '/usr/bin'
elif name == 'SBINDIR':
return '/usr/sbin'
elif name == 'LIBEXECDIR':
return '/usr/libexec/apache2'
elif name == 'PROGNAME':
return 'httpd'
elif name == 'SHLIBPATH_VAR':
return 'DYLD_LIBRARY_PATH'
else:
return ''
def get_apr_includes():
return ''
def get_apu_includes():
return ''
elif WITH_TARBALL_PACKAGE:
SCRIPT_DIR = os.path.join(os.path.dirname(__file__), 'src', 'packages')
CONFIG_FILE = os.path.join(SCRIPT_DIR, 'apache/build/config_vars.mk')
CONFIG = {}
with open(CONFIG_FILE) as fp:
for line in fp.readlines():
name, value = line.split('=', 1)
name = name.strip()
value = value.strip()
CONFIG[name] = value
_varprog = re.compile(r'\$(\w+|(?:\{[^}]*\}|\([^)]*\)))')
def expand_vars(value):
if '$' not in value:
return value
i = 0
while True:
m = _varprog.search(value, i)
if not m:
break
i, j = m.span(0)
name = m.group(1)
if name.startswith('{') and name.endswith('}'):
name = name[1:-1]
elif name.startswith('(') and name.endswith(')'):
name = name[1:-1]
if name in CONFIG:
tail = value[j:]
value = value[:i] + CONFIG.get(name, '')
i = len(value)
value += tail
else:
i = j
return value
def get_apxs_config(name):
value = CONFIG.get(name, '')
sub_value = expand_vars(value)
while value != sub_value:
value = sub_value
sub_value = expand_vars(value)
return sub_value.replace('/mod_wsgi-packages/', SCRIPT_DIR+'/')
def get_apr_includes():
return ''
def get_apu_includes():
return ''
CONFIG['PREFIX'] = get_apxs_config('prefix')
CONFIG['TARGET'] = get_apxs_config('target')
CONFIG['SYSCONFDIR'] = get_apxs_config('sysconfdir')
CONFIG['INCLUDEDIR'] = get_apxs_config('includedir')
CONFIG['LIBEXECDIR'] = get_apxs_config('libexecdir')
CONFIG['BINDIR'] = get_apxs_config('bindir')
CONFIG['SBINDIR'] = get_apxs_config('sbindir')
CONFIG['PROGNAME'] = get_apxs_config('progname')
else:
def get_apxs_config(query):
p = subprocess.Popen([APXS, '-q', query],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
def get_apr_includes():
if not APR_CONFIG:
return ''
p = subprocess.Popen([APR_CONFIG, '--includes'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
def get_apu_includes():
if not APU_CONFIG:
return ''
p = subprocess.Popen([APU_CONFIG, '--includes'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if isinstance(out, bytes):
out = out.decode('UTF-8')
return out.strip()
INCLUDEDIR = get_apxs_config('INCLUDEDIR')
CPPFLAGS = get_apxs_config('CPPFLAGS').split()
CFLAGS = get_apxs_config('CFLAGS').split()
EXTRA_INCLUDES = get_apxs_config('EXTRA_INCLUDES').split()
EXTRA_CPPFLAGS = get_apxs_config('EXTRA_CPPFLAGS').split()
EXTRA_CFLAGS = get_apxs_config('EXTRA_CFLAGS').split()
APR_CONFIG = get_apxs_config('APR_CONFIG')
APU_CONFIG = get_apxs_config('APU_CONFIG')
# on MacOS X Sierra, which has decided to not provide either it or the
# 'apu-1-config' script and otherwise completely broken 'apxs'. In that
# case we manually set the locations of the Apache and APR header files.
if (not os.path.exists(APR_CONFIG) and
os.path.exists('/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk')):
INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apache2'
APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apr-1']
APU_INCLUDES = []
elif (not os.path.exists(APR_CONFIG) and
os.path.exists('/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk')):
INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apache2'
APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apr-1']
APU_INCLUDES = []
else:
APR_INCLUDES = get_apr_includes().split()
APU_INCLUDES = get_apu_includes().split()
if not os.path.exists(APR_CONFIG) and not INCLUDEDIR:
if sys.platform == 'darwin':
# Likely no Xcode application installed or location of SDK in
# Xcode has changed with a new release of Xcode application.
raise RuntimeError('No Apache installation can be found, do you '
'have the full Apple Xcode installed. It is not enough to '
'have just the xcode command line tools installed.')
else:
# Set INCLUDEDIR just to avoid having an empty path. Probably
# should raise an exception here.
INCLUDEDIR = '/usr/include'
# Write out apxs_config.py which caches various configuration related to
# Apache. For the case of using our own Apache build, this needs to
# calculate values dynamically based on where binaries were installed.
# This is necessary as on OpenShift the virtual environment gets copied
# for each gear to a different path. We can't therefore rely on a hard
BINDIR = get_apxs_config('BINDIR')
SBINDIR = get_apxs_config('SBINDIR')
PROGNAME = get_apxs_config('PROGNAME')
MPM_NAME = get_apxs_config('MPM_NAME')
LIBEXECDIR = get_apxs_config('LIBEXECDIR')
SHLIBPATH_VAR = get_apxs_config('SHLIBPATH_VAR')
APXS_CONFIG_TEMPLATE = """
import os
WITH_TARBALL_PACKAGE = %(WITH_TARBALL_PACKAGE)r
WITH_HTTPD_PACKAGE = %(WITH_HTTPD_PACKAGE)r
if WITH_HTTPD_PACKAGE:
from mod_wsgi_packages.httpd import __file__ as PACKAGES_ROOTDIR
PACKAGES_ROOTDIR = os.path.dirname(PACKAGES_ROOTDIR)
BINDIR = os.path.join(PACKAGES_ROOTDIR, 'bin')
SBINDIR = BINDIR
LIBEXECDIR = os.path.join(PACKAGES_ROOTDIR, 'modules')
SHLIBPATH = os.path.join(PACKAGES_ROOTDIR, 'lib')
elif WITH_TARBALL_PACKAGE:
from mod_wsgi.packages import __file__ as PACKAGES_ROOTDIR
PACKAGES_ROOTDIR = os.path.dirname(PACKAGES_ROOTDIR)
BINDIR = os.path.join(PACKAGES_ROOTDIR, 'apache', 'bin')
SBINDIR = BINDIR
LIBEXECDIR = os.path.join(PACKAGES_ROOTDIR, 'apache', 'modules')
SHLIBPATH = []
SHLIBPATH.append(os.path.join(PACKAGES_ROOTDIR, 'apr-util', 'lib'))
SHLIBPATH.append(os.path.join(PACKAGES_ROOTDIR, 'apr', 'lib'))
SHLIBPATH = ':'.join(SHLIBPATH)
else:
BINDIR = '%(BINDIR)s'
SBINDIR = '%(SBINDIR)s'
LIBEXECDIR = '%(LIBEXECDIR)s'
SHLIBPATH = ''
MPM_NAME = '%(MPM_NAME)s'
PROGNAME = '%(PROGNAME)s'
SHLIBPATH_VAR = '%(SHLIBPATH_VAR)s'
if os.path.exists(os.path.join(SBINDIR, PROGNAME)):
HTTPD = os.path.join(SBINDIR, PROGNAME)
elif os.path.exists(os.path.join(BINDIR, PROGNAME)):
HTTPD = os.path.join(BINDIR, PROGNAME)
else:
HTTPD = PROGNAME
if os.path.exists(os.path.join(SBINDIR, 'rotatelogs')):
ROTATELOGS = os.path.join(SBINDIR, 'rotatelogs')
elif os.path.exists(os.path.join(BINDIR, 'rotatelogs')):
ROTATELOGS = os.path.join(BINDIR, 'rotatelogs')
else:
ROTATELOGS = 'rotatelogs'
"""
with open(os.path.join(os.path.dirname(__file__),
'src/server/apxs_config.py'), 'w') as fp:
print(APXS_CONFIG_TEMPLATE % dict(
WITH_TARBALL_PACKAGE=WITH_TARBALL_PACKAGE,
WITH_HTTPD_PACKAGE=WITH_HTTPD_PACKAGE,
BINDIR=BINDIR, SBINDIR=SBINDIR, LIBEXECDIR=LIBEXECDIR,
MPM_NAME=MPM_NAME, PROGNAME=PROGNAME,
SHLIBPATH_VAR=SHLIBPATH_VAR), file=fp)
PYTHON_VERSION = get_python_config('VERSION')
if os.name == 'nt':
if hasattr(sys, 'real_prefix'):
PYTHON_LIBDIR = sys.real_prefix
else:
PYTHON_LIBDIR = get_python_config('BINDIR')
PYTHON_LDFLAGS = []
PYTHON_LDLIBS = ['%s/libs/python%s.lib' % (PYTHON_LIBDIR, PYTHON_VERSION),
'%s/lib/libhttpd.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libapr-1.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libaprutil-1.lib' % WITH_WINDOWS_APACHE,
'%s/lib/libapriconv-1.lib' % WITH_WINDOWS_APACHE]
else:
PYTHON_LDVERSION = get_python_config('LDVERSION') or PYTHON_VERSION
PYTHON_LIBDIR = get_python_config('LIBDIR')
PYTHON_CFGDIR = get_python_lib(plat_specific=1, standard_lib=1) + '/config'
if PYTHON_LDVERSION and PYTHON_LDVERSION != PYTHON_VERSION:
PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, PYTHON_LDVERSION)
if not os.path.exists(PYTHON_CFGDIR):
PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, sys.platform)
PYTHON_LDFLAGS = ['-L%s' % PYTHON_LIBDIR, '-L%s' % PYTHON_CFGDIR]
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_LDVERSION]
if os.path.exists(os.path.join(PYTHON_LIBDIR,
'libpython%s.a' % PYTHON_VERSION)):
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
if os.path.exists(os.path.join(PYTHON_CFGDIR,
'libpython%s.a' % PYTHON_VERSION)):
PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION]
INCLUDE_DIRS = [INCLUDEDIR]
EXTRA_COMPILE_FLAGS = (EXTRA_INCLUDES + CPPFLAGS + EXTRA_CPPFLAGS +
CFLAGS + EXTRA_CFLAGS + APR_INCLUDES + APU_INCLUDES)
EXTRA_LINK_ARGS = PYTHON_LDFLAGS + PYTHON_LDLIBS
if os.name != 'nt':
LD_RUN_PATH = os.environ.get('LD_RUN_PATH', '')
LD_RUN_PATH += ':%s:%s' % (PYTHON_LIBDIR, PYTHON_CFGDIR)
LD_RUN_PATH = LD_RUN_PATH.lstrip(':')
os.environ['LD_RUN_PATH'] = LD_RUN_PATH
# Apache modules with a target older than 10.8. This is because it
# screws up Apache APR % formats for apr_time_t, which breaks daemon
# mode queue time. For the target to be 10.8 or newer for now if Python
# installation supports older versions. This means that things will not
# build for older MacOS X versions. Deal with these when they occur.
if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if target is None:
target = get_python_config('MACOSX_DEPLOYMENT_TARGET')
if target:
target_version = tuple(map(int, target.split('.')))
#assert target_version >= (10, 8), \
# 'Minimum of 10.8 for MACOSX_DEPLOYMENT_TARGET'
if target_version < (10, 8):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.8'
# Now add the definitions to build everything.
if os.name == 'nt':
extension_name = 'mod_wsgi.server.mod_wsgi'
else:
extension_name = 'mod_wsgi.server.mod_wsgi-py%s%s' % sys.version_info[:2]
extension = Extension(extension_name, source_files,
include_dirs=INCLUDE_DIRS, extra_compile_args=EXTRA_COMPILE_FLAGS,
extra_link_args=EXTRA_LINK_ARGS)
def _documentation():
result = []
prefix = 'docs/_build/html'
for root, dirs, files in os.walk(prefix, topdown=False):
for name in files:
if root == prefix:
result.append(os.path.join(root[len(prefix):], name))
else:
result.append(os.path.join(root[len(prefix)+1:], name))
return result
def _version():
path = 'src/server/wsgi_version.h'
pattern = r'
with open(path, 'r') as fp:
match = re.search(pattern, fp.read(), flags=re.MULTILINE)
return match.group('version')
# Final check to make sure a shared library for Python does actually
# exist. Warn if one doesn't as we really want a shared library.
SHARED_LIBRARY_WARNING = """
WARNING: The Python installation you are using does not appear to have
been installed with a shared library, or in the case of MacOS X, as a
framework. Where these are not present, the compilation of mod_wsgi may
fail, or if it does succeed, will result in extra memory being used by
all processes at run time as a result of the static library needing to
be loaded in its entirety to every process. It is highly recommended
that you reinstall the Python installation being used from source code,
supplying the '--enable-shared' option to the 'configure' script when
configuring the source code prior to building and installing it.
"""
if os.name != 'nt':
if (not get_python_config('Py_ENABLE_SHARED') and
not get_python_config('PYTHONFRAMEWORK')):
print(SHARED_LIBRARY_WARNING)
# Now finally run distutils.
long_description = open('README.rst').read()
setup(name = 'mod_wsgi',
version = _version(),
description = 'Installer for Apache/mod_wsgi.',
long_description = long_description,
author = 'Graham Dumpleton',
author_email = 'Graham.Dumpleton@gmail.com',
maintainer = 'Graham Dumpleton',
maintainer_email = 'Graham.Dumpleton@gmail.com',
url = 'http://www.modwsgi.org/',
bugtrack_url = 'https://github.com/GrahamDumpleton/mod_wsgi/issues',
license = 'Apache License, Version 2.0',
platforms = [],
download_url = None,
classifiers = [
'Development Status :: 6 - Mature',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server'
],
keywords = 'mod_wsgi wsgi apache',
packages = ['mod_wsgi', 'mod_wsgi.server', 'mod_wsgi.server.management',
'mod_wsgi.server.management.commands', 'mod_wsgi.docs',
'mod_wsgi.images'],
package_dir = {'mod_wsgi': 'src', 'mod_wsgi.docs': 'docs/_build/html',
'mod_wsgi.images': 'images'},
package_data = {'mod_wsgi.docs': _documentation(),
'mod_wsgi.images': ['snake-whiskey.jpg']},
ext_modules = [extension],
entry_points = { 'console_scripts':
['mod_wsgi-express = mod_wsgi.server:main'],},
zip_safe = False,
)
| true | true |
f72fa655c2b497532a44793ccd4680c179b00048 | 4,942 | py | Python | zhiliao/filebrowsersafe/fields.py | gladgod/zhiliao | 573dfbe56734388c9657cb6749d267f4a8885d5b | [
"BSD-3-Clause"
] | null | null | null | zhiliao/filebrowsersafe/fields.py | gladgod/zhiliao | 573dfbe56734388c9657cb6749d267f4a8885d5b | [
"BSD-3-Clause"
] | null | null | null | zhiliao/filebrowsersafe/fields.py | gladgod/zhiliao | 573dfbe56734388c9657cb6749d267f4a8885d5b | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from future.builtins import str
from future.builtins import super
# coding: utf-8
# imports
import os
import datetime
# django imports
from django.db import models
from django import forms
from django.core.files.storage import default_storage
from django.forms.widgets import Input
from django.db.models.fields import Field
from django.template.loader import render_to_string
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
# filebrowser imports
from .settings import *
from .base import FileObject
from .functions import url_to_path, get_directory
from future.utils import with_metaclass
class FileBrowseWidget(Input):
input_type = 'text'
class Media:
js = (os.path.join(URL_FILEBROWSER_MEDIA, 'js/AddFileBrowser.js'), )
def __init__(self, attrs=None):
self.directory = attrs.get('directory', '')
self.extensions = attrs.get('extensions', '')
self.format = attrs.get('format', '')
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def render(self, name, value, attrs=None):
if value is None:
value = ""
directory = self.directory
if self.directory:
if callable(self.directory):
directory = self.directory()
directory = os.path.normpath(datetime.datetime.now().strftime(directory))
fullpath = os.path.join(get_directory(), directory)
if not default_storage.isdir(fullpath):
default_storage.makedirs(fullpath)
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
final_attrs['search_icon'] = URL_FILEBROWSER_MEDIA + 'img/filebrowser_icon_show.gif'
final_attrs['directory'] = directory
final_attrs['extensions'] = self.extensions
final_attrs['format'] = self.format
final_attrs['ADMIN_THUMBNAIL'] = ADMIN_THUMBNAIL
final_attrs['DEBUG'] = DEBUG
if value != "":
try:
final_attrs['directory'] = os.path.split(value.path_relative_directory)[0]
except:
pass
return render_to_string("filebrowser/custom_field.html", dict(locals(), MEDIA_URL=MEDIA_URL))
class FileBrowseFormField(forms.CharField):
widget = FileBrowseWidget
default_error_messages = {
'extension': _(u'Extension %(ext)s is not allowed. Only %(allowed)s is allowed.'),
}
def __init__(self, max_length=None, min_length=None,
directory=None, extensions=None, format=None,
*args, **kwargs):
self.max_length, self.min_length = max_length, min_length
self.directory = directory
self.extensions = extensions
if format:
self.format = format or ''
self.extensions = extensions or EXTENSIONS.get(format)
super(FileBrowseFormField, self).__init__(*args, **kwargs)
def clean(self, value):
value = super(FileBrowseFormField, self).clean(value)
if value == '':
return value
file_extension = os.path.splitext(value)[1].lower().split("?")[0]
if self.extensions and not file_extension in self.extensions:
raise forms.ValidationError(self.error_messages['extension'] % {'ext': file_extension, 'allowed': ", ".join(self.extensions)})
return value
class FileBrowseField(with_metaclass(models.SubfieldBase, Field)):
def __init__(self, *args, **kwargs):
self.directory = kwargs.pop('directory', '')
self.extensions = kwargs.pop('extensions', '')
self.format = kwargs.pop('format', '')
return super(FileBrowseField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value or isinstance(value, FileObject):
return value
return FileObject(url_to_path(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return smart_str(value)
def get_manipulator_field_objs(self):
return [oldforms.TextField]
def get_internal_type(self):
return "CharField"
def formfield(self, **kwargs):
attrs = {}
attrs["directory"] = self.directory
attrs["extensions"] = self.extensions
attrs["format"] = self.format
defaults = {
'form_class': FileBrowseFormField,
'widget': FileBrowseWidget(attrs=attrs),
'directory': self.directory,
'extensions': self.extensions,
'format': self.format
}
defaults.update(kwargs)
return super(FileBrowseField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^filebrowsersafe\.fields\.FileBrowseField"])
except ImportError:
pass
| 35.3 | 138 | 0.653784 | from __future__ import unicode_literals
from future.builtins import str
from future.builtins import super
import os
import datetime
from django.db import models
from django import forms
from django.core.files.storage import default_storage
from django.forms.widgets import Input
from django.db.models.fields import Field
from django.template.loader import render_to_string
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from .settings import *
from .base import FileObject
from .functions import url_to_path, get_directory
from future.utils import with_metaclass
class FileBrowseWidget(Input):
input_type = 'text'
class Media:
js = (os.path.join(URL_FILEBROWSER_MEDIA, 'js/AddFileBrowser.js'), )
def __init__(self, attrs=None):
self.directory = attrs.get('directory', '')
self.extensions = attrs.get('extensions', '')
self.format = attrs.get('format', '')
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def render(self, name, value, attrs=None):
if value is None:
value = ""
directory = self.directory
if self.directory:
if callable(self.directory):
directory = self.directory()
directory = os.path.normpath(datetime.datetime.now().strftime(directory))
fullpath = os.path.join(get_directory(), directory)
if not default_storage.isdir(fullpath):
default_storage.makedirs(fullpath)
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
final_attrs['search_icon'] = URL_FILEBROWSER_MEDIA + 'img/filebrowser_icon_show.gif'
final_attrs['directory'] = directory
final_attrs['extensions'] = self.extensions
final_attrs['format'] = self.format
final_attrs['ADMIN_THUMBNAIL'] = ADMIN_THUMBNAIL
final_attrs['DEBUG'] = DEBUG
if value != "":
try:
final_attrs['directory'] = os.path.split(value.path_relative_directory)[0]
except:
pass
return render_to_string("filebrowser/custom_field.html", dict(locals(), MEDIA_URL=MEDIA_URL))
class FileBrowseFormField(forms.CharField):
widget = FileBrowseWidget
default_error_messages = {
'extension': _(u'Extension %(ext)s is not allowed. Only %(allowed)s is allowed.'),
}
def __init__(self, max_length=None, min_length=None,
directory=None, extensions=None, format=None,
*args, **kwargs):
self.max_length, self.min_length = max_length, min_length
self.directory = directory
self.extensions = extensions
if format:
self.format = format or ''
self.extensions = extensions or EXTENSIONS.get(format)
super(FileBrowseFormField, self).__init__(*args, **kwargs)
def clean(self, value):
value = super(FileBrowseFormField, self).clean(value)
if value == '':
return value
file_extension = os.path.splitext(value)[1].lower().split("?")[0]
if self.extensions and not file_extension in self.extensions:
raise forms.ValidationError(self.error_messages['extension'] % {'ext': file_extension, 'allowed': ", ".join(self.extensions)})
return value
class FileBrowseField(with_metaclass(models.SubfieldBase, Field)):
def __init__(self, *args, **kwargs):
self.directory = kwargs.pop('directory', '')
self.extensions = kwargs.pop('extensions', '')
self.format = kwargs.pop('format', '')
return super(FileBrowseField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value or isinstance(value, FileObject):
return value
return FileObject(url_to_path(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return smart_str(value)
def get_manipulator_field_objs(self):
return [oldforms.TextField]
def get_internal_type(self):
return "CharField"
def formfield(self, **kwargs):
attrs = {}
attrs["directory"] = self.directory
attrs["extensions"] = self.extensions
attrs["format"] = self.format
defaults = {
'form_class': FileBrowseFormField,
'widget': FileBrowseWidget(attrs=attrs),
'directory': self.directory,
'extensions': self.extensions,
'format': self.format
}
defaults.update(kwargs)
return super(FileBrowseField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^filebrowsersafe\.fields\.FileBrowseField"])
except ImportError:
pass
| true | true |
f72fa985b72bc3ee85306c46c1ed7734c3a40686 | 2,607 | py | Python | bahamutexporter/core/service.py | Yooootsuba/bahamut-exporter | 1fcf95f7eca86709ece1ed1d2704c540731682d5 | [
"MIT"
] | 2 | 2022-01-10T06:53:11.000Z | 2022-01-10T07:41:51.000Z | bahamutexporter/core/service.py | Yooootsuba/bahamut-exporter | 1fcf95f7eca86709ece1ed1d2704c540731682d5 | [
"MIT"
] | null | null | null | bahamutexporter/core/service.py | Yooootsuba/bahamut-exporter | 1fcf95f7eca86709ece1ed1d2704c540731682d5 | [
"MIT"
] | null | null | null | import re
import html
import json
import requests
from bs4 import BeautifulSoup
class BamahutExporterService:
def __init__(self):
self.session = requests.Session()
self.session.headers.update({'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'})
def is_last_page(self, page, response):
return page > int(re.search('var args =.*page=([0-9]+)', response.text).group(1))
def parse_replies(self, bsn, snB):
replies = []
response = self.session.get('https://forum.gamer.com.tw/ajax/moreCommend.php', params = {'bsn': bsn, 'snB': snB}).json()
response.pop('next_snC')
for reply in response.values():
replies.append(
{
'username' : reply['userid'],
'nickname' : reply['nick'],
'datetime' : reply['wtime'],
'content' : reply['content'],
'comment' : html.escape('{"content":"%s"}' % reply['content']),
}
)
replies.reverse()
return replies
def parse_floor(self, bsn, floor):
if (hint := floor.find('div', {'class': 'hint'})) is not None:
return {
'floor' : floor.find('div', {'class': 'floor'}).text,
'hint' : hint.text,
}
else:
return {
'floor' : floor.find('a', {'class': 'floor tippy-gpbp'}).text,
'username' : floor.find('a', {'class': 'userid'}).text,
'nickname' : floor.find('a', {'class': 'username'}).text,
'datetime' : floor.find('a', {'class': 'edittime tippy-post-info'}).get('data-mtime'),
'content' : floor.find('div', {'class': 'c-article__content'}),
'replies' : self.parse_replies(bsn, floor.get('id').replace('post_', '')),
}
def export(self, bsn, snA):
page = 0
floors = []
while True:
# Get page
page += 1
response = self.session.get('https://forum.gamer.com.tw/C.php', params = {'bsn': bsn, 'snA': snA, 'page': page})
soup = BeautifulSoup(response.text, 'html.parser')
# Break loop when the page is last
if self.is_last_page(page, response):
return floors
# Get floors
for floor in soup.find_all('section', {'class': 'c-section', 'id': re.compile('.*')}):
floors.append(self.parse_floor(bsn, floor))
| 35.22973 | 159 | 0.513617 | import re
import html
import json
import requests
from bs4 import BeautifulSoup
class BamahutExporterService:
def __init__(self):
self.session = requests.Session()
self.session.headers.update({'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36'})
def is_last_page(self, page, response):
return page > int(re.search('var args =.*page=([0-9]+)', response.text).group(1))
def parse_replies(self, bsn, snB):
replies = []
response = self.session.get('https://forum.gamer.com.tw/ajax/moreCommend.php', params = {'bsn': bsn, 'snB': snB}).json()
response.pop('next_snC')
for reply in response.values():
replies.append(
{
'username' : reply['userid'],
'nickname' : reply['nick'],
'datetime' : reply['wtime'],
'content' : reply['content'],
'comment' : html.escape('{"content":"%s"}' % reply['content']),
}
)
replies.reverse()
return replies
def parse_floor(self, bsn, floor):
if (hint := floor.find('div', {'class': 'hint'})) is not None:
return {
'floor' : floor.find('div', {'class': 'floor'}).text,
'hint' : hint.text,
}
else:
return {
'floor' : floor.find('a', {'class': 'floor tippy-gpbp'}).text,
'username' : floor.find('a', {'class': 'userid'}).text,
'nickname' : floor.find('a', {'class': 'username'}).text,
'datetime' : floor.find('a', {'class': 'edittime tippy-post-info'}).get('data-mtime'),
'content' : floor.find('div', {'class': 'c-article__content'}),
'replies' : self.parse_replies(bsn, floor.get('id').replace('post_', '')),
}
def export(self, bsn, snA):
page = 0
floors = []
while True:
page += 1
response = self.session.get('https://forum.gamer.com.tw/C.php', params = {'bsn': bsn, 'snA': snA, 'page': page})
soup = BeautifulSoup(response.text, 'html.parser')
if self.is_last_page(page, response):
return floors
for floor in soup.find_all('section', {'class': 'c-section', 'id': re.compile('.*')}):
floors.append(self.parse_floor(bsn, floor))
| true | true |
f72faa1d19c3225e9a6701fb72dfb2c626ceba53 | 3,099 | py | Python | airflow/dags/udac_example_dag.py | aliiae/data-engineering | 96ee73a52de9504fc7e9eda748c90e0966c4fa03 | [
"MIT"
] | null | null | null | airflow/dags/udac_example_dag.py | aliiae/data-engineering | 96ee73a52de9504fc7e9eda748c90e0966c4fa03 | [
"MIT"
] | null | null | null | airflow/dags/udac_example_dag.py | aliiae/data-engineering | 96ee73a52de9504fc7e9eda748c90e0966c4fa03 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators import (
StageToRedshiftOperator,
LoadFactOperator,
LoadDimensionOperator,
DataQualityOperator,
)
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.postgres_operator import PostgresOperator
from helpers import SqlQueries
REDSHIFT_CONN_ID = 'redshift'
AWS_CREDENTIALS_ID = 'aws_credentials'
INPUT_BUCKET = 'udacity-dend'
default_args = {
'owner': 'udacity',
'start_date': datetime(2018, 11, 1),
'depends_on_past': False,
'retries': 3,
'retry_delay': timedelta(seconds=300),
'catchup': False,
}
fact_table_name_and_query = ('songplays', SqlQueries.songplay_table_insert)
dim_tables_name_to_query = {
'users': SqlQueries.user_table_insert,
'songs': SqlQueries.song_table_insert,
'artists': SqlQueries.artist_table_insert,
'time': SqlQueries.time_table_insert,
}
dag = DAG(
'udac_example_dag',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='@hourly',
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
create_tables = PostgresOperator(
task_id='Create_tables',
dag=dag,
postgres_conn_id=REDSHIFT_CONN_ID,
sql='/create_tables.sql',
)
stage_events_to_redshift = StageToRedshiftOperator(
task_id='Stage_events',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='log_data/{execution_date.year}/{execution_date.month}/',
table='staging_events',
file_format="JSON 's3://udacity-dend/log_json_path.json'",
provide_context=True,
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='song_data',
table='staging_songs',
file_format="JSON 'auto'",
provide_context=True,
)
load_songplays_table = LoadFactOperator(
task_id=f'Load_{fact_table_name_and_query[0]}_fact_table',
dag=dag,
table=fact_table_name_and_query[0],
conn_id=REDSHIFT_CONN_ID,
sql=fact_table_name_and_query[1],
)
dim_operators = [
LoadDimensionOperator(
task_id=f'Load_{dim_table_name}_dim_table',
dag=dag,
table=dim_table_name,
conn_id=REDSHIFT_CONN_ID,
sql=dim_query,
)
for dim_table_name, dim_query in dim_tables_name_to_query.items()
]
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
tables=list(dim_tables_name_to_query) + [fact_table_name_and_query[0]],
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> create_tables
create_tables >> [stage_events_to_redshift, stage_songs_to_redshift]
[stage_events_to_redshift, stage_songs_to_redshift] >> load_songplays_table
load_songplays_table >> dim_operators
dim_operators + [load_songplays_table] >> run_quality_checks
run_quality_checks >> end_operator
| 28.431193 | 75 | 0.757018 | from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators import (
StageToRedshiftOperator,
LoadFactOperator,
LoadDimensionOperator,
DataQualityOperator,
)
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.postgres_operator import PostgresOperator
from helpers import SqlQueries
REDSHIFT_CONN_ID = 'redshift'
AWS_CREDENTIALS_ID = 'aws_credentials'
INPUT_BUCKET = 'udacity-dend'
default_args = {
'owner': 'udacity',
'start_date': datetime(2018, 11, 1),
'depends_on_past': False,
'retries': 3,
'retry_delay': timedelta(seconds=300),
'catchup': False,
}
fact_table_name_and_query = ('songplays', SqlQueries.songplay_table_insert)
dim_tables_name_to_query = {
'users': SqlQueries.user_table_insert,
'songs': SqlQueries.song_table_insert,
'artists': SqlQueries.artist_table_insert,
'time': SqlQueries.time_table_insert,
}
dag = DAG(
'udac_example_dag',
default_args=default_args,
description='Load and transform data in Redshift with Airflow',
schedule_interval='@hourly',
)
start_operator = DummyOperator(task_id='Begin_execution', dag=dag)
create_tables = PostgresOperator(
task_id='Create_tables',
dag=dag,
postgres_conn_id=REDSHIFT_CONN_ID,
sql='/create_tables.sql',
)
stage_events_to_redshift = StageToRedshiftOperator(
task_id='Stage_events',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='log_data/{execution_date.year}/{execution_date.month}/',
table='staging_events',
file_format="JSON 's3://udacity-dend/log_json_path.json'",
provide_context=True,
)
stage_songs_to_redshift = StageToRedshiftOperator(
task_id='Stage_songs',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
aws_credentials_id=AWS_CREDENTIALS_ID,
s3_bucket=INPUT_BUCKET,
s3_key='song_data',
table='staging_songs',
file_format="JSON 'auto'",
provide_context=True,
)
load_songplays_table = LoadFactOperator(
task_id=f'Load_{fact_table_name_and_query[0]}_fact_table',
dag=dag,
table=fact_table_name_and_query[0],
conn_id=REDSHIFT_CONN_ID,
sql=fact_table_name_and_query[1],
)
dim_operators = [
LoadDimensionOperator(
task_id=f'Load_{dim_table_name}_dim_table',
dag=dag,
table=dim_table_name,
conn_id=REDSHIFT_CONN_ID,
sql=dim_query,
)
for dim_table_name, dim_query in dim_tables_name_to_query.items()
]
run_quality_checks = DataQualityOperator(
task_id='Run_data_quality_checks',
dag=dag,
conn_id=REDSHIFT_CONN_ID,
tables=list(dim_tables_name_to_query) + [fact_table_name_and_query[0]],
)
end_operator = DummyOperator(task_id='Stop_execution', dag=dag)
start_operator >> create_tables
create_tables >> [stage_events_to_redshift, stage_songs_to_redshift]
[stage_events_to_redshift, stage_songs_to_redshift] >> load_songplays_table
load_songplays_table >> dim_operators
dim_operators + [load_songplays_table] >> run_quality_checks
run_quality_checks >> end_operator
| true | true |
f72faaed66845c4f79aa0c5322e4ebcb367d7bcc | 766 | py | Python | Misc/d3_heatmap.py | mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | [
"MIT"
] | null | null | null | Misc/d3_heatmap.py | mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | [
"MIT"
] | null | null | null | Misc/d3_heatmap.py | mahnooranjum/Python_Programming | ba251e0e855842112efeb968d06458c60eaf1bd3 | [
"MIT"
] | null | null | null | '''
Mahnoor Anjum
Python:
Trivariate Analysis
'''
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
import random
from mpl_toolkits.mplot3d import Axes3D
# sns.set()
path = 'data/private/savepath/'
filename = 'v3_1'
genpath = 'data/private/gen/'
genname = 'g3_1'
data = pd.read_csv(path + filename+'.csv')
gen = pd.read_csv(genpath + genname + '.csv')
k = 50
data = data.sample(k)
x = data['x1']
y = data['x2']
z = data['x3']
fig = plt.figure(figsize=(20,20))
data = pd.DataFrame({'X': x, 'Y': y, 'Z': z})
data_pivoted = data.pivot("X", "Y", "Z")
ax = sns.heatmap(data_pivoted)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(str(k)+"_samples")
| 18.238095 | 45 | 0.66188 | import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
import random
from mpl_toolkits.mplot3d import Axes3D
path = 'data/private/savepath/'
filename = 'v3_1'
genpath = 'data/private/gen/'
genname = 'g3_1'
data = pd.read_csv(path + filename+'.csv')
gen = pd.read_csv(genpath + genname + '.csv')
k = 50
data = data.sample(k)
x = data['x1']
y = data['x2']
z = data['x3']
fig = plt.figure(figsize=(20,20))
data = pd.DataFrame({'X': x, 'Y': y, 'Z': z})
data_pivoted = data.pivot("X", "Y", "Z")
ax = sns.heatmap(data_pivoted)
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_xticks([])
ax.set_yticks([])
ax.set_title(str(k)+"_samples")
| true | true |
f72fab45a2f4c772cc4437cfc0c167040cf1b405 | 1,591 | py | Python | processout/invoicerisk.py | processout/processout-python | f8311702fe2d392817dcb66347a7a13b9cd91e92 | [
"MIT"
] | 1 | 2017-02-22T10:57:10.000Z | 2017-02-22T10:57:10.000Z | processout/invoicerisk.py | processout/processout-python | f8311702fe2d392817dcb66347a7a13b9cd91e92 | [
"MIT"
] | 1 | 2018-12-14T14:30:57.000Z | 2018-12-14T14:30:57.000Z | processout/invoicerisk.py | processout/processout-python | f8311702fe2d392817dcb66347a7a13b9cd91e92 | [
"MIT"
] | 2 | 2018-12-13T23:08:11.000Z | 2018-12-30T19:52:31.000Z | try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
import processout
import json
from processout.networking.request import Request
from processout.networking.response import Response
# The content of this file was automatically generated
class InvoiceRisk(object):
def __init__(self, client, prefill = None):
self._client = client
self._score = None
self._is_legit = None
if prefill != None:
self.fill_with_data(prefill)
@property
def score(self):
"""Get score"""
return self._score
@score.setter
def score(self, val):
"""Set score
Keyword argument:
val -- New score value"""
self._score = val
return self
@property
def is_legit(self):
"""Get is_legit"""
return self._is_legit
@is_legit.setter
def is_legit(self, val):
"""Set is_legit
Keyword argument:
val -- New is_legit value"""
self._is_legit = val
return self
def fill_with_data(self, data):
"""Fill the current object with the new values pulled from data
Keyword argument:
data -- The data from which to pull the new values"""
if "score" in data.keys():
self.score = data["score"]
if "is_legit" in data.keys():
self.is_legit = data["is_legit"]
return self
def to_json(self):
return {
"score": self.score,
"is_legit": self.is_legit,
}
| 23.057971 | 71 | 0.588938 | try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
import processout
import json
from processout.networking.request import Request
from processout.networking.response import Response
class InvoiceRisk(object):
def __init__(self, client, prefill = None):
self._client = client
self._score = None
self._is_legit = None
if prefill != None:
self.fill_with_data(prefill)
@property
def score(self):
return self._score
@score.setter
def score(self, val):
self._score = val
return self
@property
def is_legit(self):
return self._is_legit
@is_legit.setter
def is_legit(self, val):
self._is_legit = val
return self
def fill_with_data(self, data):
if "score" in data.keys():
self.score = data["score"]
if "is_legit" in data.keys():
self.is_legit = data["is_legit"]
return self
def to_json(self):
return {
"score": self.score,
"is_legit": self.is_legit,
}
| true | true |
f72fac17ae338185f2e1470376f1a6802595e44a | 1,514 | py | Python | jnpy/app/vnpy_webtrader/__init__.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | 5 | 2020-05-19T07:32:39.000Z | 2022-03-14T09:09:48.000Z | jnpy/app/vnpy_webtrader/__init__.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | null | null | null | jnpy/app/vnpy_webtrader/__init__.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | 3 | 2020-04-02T08:30:17.000Z | 2020-05-03T12:12:05.000Z | # The MIT License (MIT)
#
# Copyright (c) 2015-present, Xiaoyou Chen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pathlib import Path
from vnpy.trader.app import BaseApp
from .engine import WebEngine, APP_NAME
class WebTraderApp(BaseApp):
""""""
app_name = APP_NAME
app_module = __module__
app_path = Path(__file__).parent
display_name = "Web服务"
engine_class = WebEngine
widget_name = "WebManager"
icon_name = "web.ico"
| 37.85 | 81 | 0.738441 |
from pathlib import Path
from vnpy.trader.app import BaseApp
from .engine import WebEngine, APP_NAME
class WebTraderApp(BaseApp):
app_name = APP_NAME
app_module = __module__
app_path = Path(__file__).parent
display_name = "Web服务"
engine_class = WebEngine
widget_name = "WebManager"
icon_name = "web.ico"
| true | true |
f72facb0930186c23043da299ca86f58450425a0 | 3,448 | py | Python | src/plugins/radeky_bot/utils/bilibiliuploader/bilibiliuploader.py | Radekyspec/Radeky_bot | 24ee088026c7443723a5e9c72abfb512ca3b3327 | [
"MIT"
] | 4 | 2021-12-25T10:17:13.000Z | 2022-03-03T03:29:07.000Z | src/plugins/radeky_bot/utils/bilibiliuploader/bilibiliuploader.py | Radekyspec/Radeky_bot | 24ee088026c7443723a5e9c72abfb512ca3b3327 | [
"MIT"
] | null | null | null | src/plugins/radeky_bot/utils/bilibiliuploader/bilibiliuploader.py | Radekyspec/Radeky_bot | 24ee088026c7443723a5e9c72abfb512ca3b3327 | [
"MIT"
] | 1 | 2021-12-25T10:17:16.000Z | 2021-12-25T10:17:16.000Z | from . import core
from .util import cipher
from nonebot import logger
import json
class BilibiliUploader():
def __init__(self):
self.access_token = None
self.refresh_token = None
self.sid = None
self.mid = None
def login(self, username, password):
code, self.access_token, self.refresh_token, self.sid, self.mid, _ = core.login(username, password)
if code != 0: # success
logger.error("login fail, error code = {}".format(code))
def login_by_access_token(self, access_token, refresh_token=None):
self.access_token = access_token
self.refresh_token = refresh_token
self.sid, self.mid, _ = core.login_by_access_token(access_token)
def login_by_access_token_file(self, file_name):
with open(file_name, "r") as f:
login_data = json.loads(f.read())
self.access_token = login_data["access_token"]
self.refresh_token = login_data["refresh_token"]
self.sid, self.mid, _ = core.login_by_access_token(self.access_token)
def save_login_data(self, file_name=None):
login_data = json.dumps(
{
"access_token": self.access_token,
"refresh_token": self.refresh_token
}
)
try:
with open(file_name, "w+") as f:
f.write(login_data)
finally:
return login_data
def upload(self,
parts,
copyright: int,
title: str,
tid: int,
tag: str,
desc: str,
source: str = '',
cover: str = '',
no_reprint: int = 0,
open_elec: int = 1,
max_retry: int = 5,
thread_pool_workers: int = 1):
return core.upload(self.access_token,
self.sid,
self.mid,
parts,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers)
def edit(self,
avid=None,
bvid=None,
parts=None,
insert_index=None,
copyright=None,
title=None,
tid=None,
tag=None,
desc=None,
source=None,
cover=None,
no_reprint=None,
open_elec=None,
max_retry: int = 5,
thread_pool_workers: int = 1):
if not avid and not bvid:
logger.warning("please provide avid or bvid")
return None, None
if not avid:
avid = cipher.bv2av(bvid)
if not isinstance(parts, list):
parts = [parts]
if type(avid) is str:
avid = int(avid)
core.edit_videos(
self.access_token,
self.sid,
self.mid,
avid,
bvid,
parts,
insert_index,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers
)
| 28.733333 | 107 | 0.476798 | from . import core
from .util import cipher
from nonebot import logger
import json
class BilibiliUploader():
def __init__(self):
self.access_token = None
self.refresh_token = None
self.sid = None
self.mid = None
def login(self, username, password):
code, self.access_token, self.refresh_token, self.sid, self.mid, _ = core.login(username, password)
if code != 0:
logger.error("login fail, error code = {}".format(code))
def login_by_access_token(self, access_token, refresh_token=None):
self.access_token = access_token
self.refresh_token = refresh_token
self.sid, self.mid, _ = core.login_by_access_token(access_token)
def login_by_access_token_file(self, file_name):
with open(file_name, "r") as f:
login_data = json.loads(f.read())
self.access_token = login_data["access_token"]
self.refresh_token = login_data["refresh_token"]
self.sid, self.mid, _ = core.login_by_access_token(self.access_token)
def save_login_data(self, file_name=None):
login_data = json.dumps(
{
"access_token": self.access_token,
"refresh_token": self.refresh_token
}
)
try:
with open(file_name, "w+") as f:
f.write(login_data)
finally:
return login_data
def upload(self,
parts,
copyright: int,
title: str,
tid: int,
tag: str,
desc: str,
source: str = '',
cover: str = '',
no_reprint: int = 0,
open_elec: int = 1,
max_retry: int = 5,
thread_pool_workers: int = 1):
return core.upload(self.access_token,
self.sid,
self.mid,
parts,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers)
def edit(self,
avid=None,
bvid=None,
parts=None,
insert_index=None,
copyright=None,
title=None,
tid=None,
tag=None,
desc=None,
source=None,
cover=None,
no_reprint=None,
open_elec=None,
max_retry: int = 5,
thread_pool_workers: int = 1):
if not avid and not bvid:
logger.warning("please provide avid or bvid")
return None, None
if not avid:
avid = cipher.bv2av(bvid)
if not isinstance(parts, list):
parts = [parts]
if type(avid) is str:
avid = int(avid)
core.edit_videos(
self.access_token,
self.sid,
self.mid,
avid,
bvid,
parts,
insert_index,
copyright,
title,
tid,
tag,
desc,
source,
cover,
no_reprint,
open_elec,
max_retry,
thread_pool_workers
)
| true | true |
f72fad071798fd320fcd248a5514555dad9f2802 | 1,006 | py | Python | netvisor_api_client/requests/product.py | ajmyyra/netvisor-api-client | b12f90606b8f66580873a1ff16737fa614aee5ef | [
"MIT"
] | null | null | null | netvisor_api_client/requests/product.py | ajmyyra/netvisor-api-client | b12f90606b8f66580873a1ff16737fa614aee5ef | [
"MIT"
] | null | null | null | netvisor_api_client/requests/product.py | ajmyyra/netvisor-api-client | b12f90606b8f66580873a1ff16737fa614aee5ef | [
"MIT"
] | null | null | null | """
netvisor.requests.product
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013-2016 by Fast Monkeys Oy | 2019- by Heltti Oy
:license: MIT, see LICENSE for more details.
"""
from .base import Request
from ..exc import InvalidData
from ..responses.products import GetProductResponse, ProductListResponse
class GetProductRequest(Request):
method = 'GET'
uri = 'GetProduct.nv'
response_cls = GetProductResponse
def parse_response(self, response):
data = super(GetProductRequest, self).parse_response(response)
self.ensure_not_empty(data)
return data
def ensure_not_empty(self, data):
if data is None:
raise InvalidData(
'Data form incorrect:. '
'Product not found with Netvisor identifier: {0}'.format(
self.params['id']
)
)
class ProductListRequest(Request):
method = 'GET'
uri = 'ProductList.nv'
response_cls = ProductListResponse
| 27.189189 | 73 | 0.621272 | from .base import Request
from ..exc import InvalidData
from ..responses.products import GetProductResponse, ProductListResponse
class GetProductRequest(Request):
method = 'GET'
uri = 'GetProduct.nv'
response_cls = GetProductResponse
def parse_response(self, response):
data = super(GetProductRequest, self).parse_response(response)
self.ensure_not_empty(data)
return data
def ensure_not_empty(self, data):
if data is None:
raise InvalidData(
'Data form incorrect:. '
'Product not found with Netvisor identifier: {0}'.format(
self.params['id']
)
)
class ProductListRequest(Request):
method = 'GET'
uri = 'ProductList.nv'
response_cls = ProductListResponse
| true | true |
f72fad7595d798f28e7a2ab25421f74e4783d2ed | 4,409 | py | Python | tests/experimental/test_util.py | claytonlemons/python-pachyderm | 6e7f4c5c66486233390f4ba3f05a2056f1e21e9f | [
"Apache-2.0"
] | null | null | null | tests/experimental/test_util.py | claytonlemons/python-pachyderm | 6e7f4c5c66486233390f4ba3f05a2056f1e21e9f | [
"Apache-2.0"
] | null | null | null | tests/experimental/test_util.py | claytonlemons/python-pachyderm | 6e7f4c5c66486233390f4ba3f05a2056f1e21e9f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Tests of utility functions."""
import os
import json
import tempfile
import python_pachyderm
from python_pachyderm.experimental.service import pps_proto
from tests import util
# bp_to_pb: PfsInput -> PFSInput
# script that copies a file using just stdlibs
TEST_STDLIB_SOURCE = """
from shutil import copyfile
print("copying")
copyfile("/pfs/{}/file.dat", "/pfs/out/file.dat")
"""
# script that copies a file with padding and colorized output, using
# third-party libraries (defined in `TEST_REQUIREMENTS_SOURCE`.)
TEST_LIB_SOURCE = """
from termcolor import cprint
from leftpad import left_pad
cprint('copying', 'green')
with open('/pfs/{}/file.dat', 'r') as f:
contents = f.read()
with open('/pfs/out/file.dat', 'w') as f:
f.write(left_pad(contents, 5))
"""
TEST_REQUIREMENTS_SOURCE = """
# WCGW?
leftpad==0.1.2
termcolor==1.1.0
"""
TEST_PIPELINE_SPEC = """
{
"pipeline": {
"name": "foobar"
},
"description": "A pipeline that performs image edge detection by using the OpenCV library.",
"input": {
"pfs": {
"glob": "/*",
"repo": "images"
}
},
"transform": {
"cmd": [ "python3", "/edges.py" ],
"image": "pachyderm/opencv"
}
}
"""
def check_expected_files(client: python_pachyderm.Client, commit, expected):
for fi in client.walk_file(commit, "/"):
path = fi.file.path
assert path in expected, "unexpected path: {}".format(path)
expected.remove(path)
for path in expected:
assert False, "expected path not found: {}".format(path)
def test_put_files():
client = python_pachyderm.experimental.Client()
client.delete_all()
repo_name = util.create_test_repo(client, "put_files")
with tempfile.TemporaryDirectory(suffix="python_pachyderm") as d:
# create a temporary directory with these files:
# 0.txt 1.txt 2.txt 3.txt 4.txt 0/0.txt 1/1.txt 2/2.txt
# 3/3.txt 4/4.txt
for i in range(5):
os.makedirs(os.path.join(d, str(i)))
for j in range(5):
with open(os.path.join(d, "{}.txt".format(j)), "w") as f:
f.write(str(j))
with open(os.path.join(d, str(j), "{}.txt".format(j)), "w") as f:
f.write(str(j))
# add the files under both `/` and `/sub` (the latter redundantly to
# test both for correct path handling and the ability to put files
# that already exist)
commit = (repo_name, "master")
python_pachyderm.put_files(client, d, commit, "/")
python_pachyderm.put_files(client, d, commit, "/sub")
python_pachyderm.put_files(client, d, commit, "/sub/")
expected = set(["/", "/sub/"])
for i in range(5):
expected.add("/{}/".format(i))
expected.add("/{}.txt".format(i))
expected.add("/{}/{}.txt".format(i, i))
expected.add("/sub/{}/".format(i))
expected.add("/sub/{}.txt".format(i))
expected.add("/sub/{}/{}.txt".format(i, i))
check_expected_files(client, commit, expected)
def test_put_files_single_file():
client = python_pachyderm.experimental.Client()
client.delete_all()
repo_name = util.create_test_repo(client, "put_files_single_file")
with tempfile.NamedTemporaryFile() as f:
f.write(b"abcd")
f.flush()
commit = (repo_name, "master")
python_pachyderm.put_files(client, f.name, commit, "/f1.txt")
python_pachyderm.put_files(client, f.name, commit, "/f/f1")
expected = set(["/", "/f1.txt", "/f/", "/f/f1"])
check_expected_files(client, commit, expected)
def test_parse_json_pipeline_spec():
req = python_pachyderm.experimental.parse_json_pipeline_spec(TEST_PIPELINE_SPEC)
check_pipeline_spec(req)
def test_parse_dict_pipeline_spec():
req = python_pachyderm.experimental.parse_dict_pipeline_spec(
json.loads(TEST_PIPELINE_SPEC)
)
check_pipeline_spec(req)
def check_pipeline_spec(req):
assert req == pps_proto.CreatePipelineRequest(
pipeline=pps_proto.Pipeline(name="foobar"),
description="A pipeline that performs image edge detection by using the OpenCV library.",
input=pps_proto.Input(
pfs=pps_proto.PfsInput(glob="/*", repo="images"),
),
transform=pps_proto.Transform(
cmd=["python3", "/edges.py"],
image="pachyderm/opencv",
),
)
| 29.198675 | 97 | 0.635972 |
import os
import json
import tempfile
import python_pachyderm
from python_pachyderm.experimental.service import pps_proto
from tests import util
TEST_STDLIB_SOURCE = """
from shutil import copyfile
print("copying")
copyfile("/pfs/{}/file.dat", "/pfs/out/file.dat")
"""
TEST_LIB_SOURCE = """
from termcolor import cprint
from leftpad import left_pad
cprint('copying', 'green')
with open('/pfs/{}/file.dat', 'r') as f:
contents = f.read()
with open('/pfs/out/file.dat', 'w') as f:
f.write(left_pad(contents, 5))
"""
TEST_REQUIREMENTS_SOURCE = """
# WCGW?
leftpad==0.1.2
termcolor==1.1.0
"""
TEST_PIPELINE_SPEC = """
{
"pipeline": {
"name": "foobar"
},
"description": "A pipeline that performs image edge detection by using the OpenCV library.",
"input": {
"pfs": {
"glob": "/*",
"repo": "images"
}
},
"transform": {
"cmd": [ "python3", "/edges.py" ],
"image": "pachyderm/opencv"
}
}
"""
def check_expected_files(client: python_pachyderm.Client, commit, expected):
for fi in client.walk_file(commit, "/"):
path = fi.file.path
assert path in expected, "unexpected path: {}".format(path)
expected.remove(path)
for path in expected:
assert False, "expected path not found: {}".format(path)
def test_put_files():
client = python_pachyderm.experimental.Client()
client.delete_all()
repo_name = util.create_test_repo(client, "put_files")
with tempfile.TemporaryDirectory(suffix="python_pachyderm") as d:
for i in range(5):
os.makedirs(os.path.join(d, str(i)))
for j in range(5):
with open(os.path.join(d, "{}.txt".format(j)), "w") as f:
f.write(str(j))
with open(os.path.join(d, str(j), "{}.txt".format(j)), "w") as f:
f.write(str(j))
commit = (repo_name, "master")
python_pachyderm.put_files(client, d, commit, "/")
python_pachyderm.put_files(client, d, commit, "/sub")
python_pachyderm.put_files(client, d, commit, "/sub/")
expected = set(["/", "/sub/"])
for i in range(5):
expected.add("/{}/".format(i))
expected.add("/{}.txt".format(i))
expected.add("/{}/{}.txt".format(i, i))
expected.add("/sub/{}/".format(i))
expected.add("/sub/{}.txt".format(i))
expected.add("/sub/{}/{}.txt".format(i, i))
check_expected_files(client, commit, expected)
def test_put_files_single_file():
client = python_pachyderm.experimental.Client()
client.delete_all()
repo_name = util.create_test_repo(client, "put_files_single_file")
with tempfile.NamedTemporaryFile() as f:
f.write(b"abcd")
f.flush()
commit = (repo_name, "master")
python_pachyderm.put_files(client, f.name, commit, "/f1.txt")
python_pachyderm.put_files(client, f.name, commit, "/f/f1")
expected = set(["/", "/f1.txt", "/f/", "/f/f1"])
check_expected_files(client, commit, expected)
def test_parse_json_pipeline_spec():
req = python_pachyderm.experimental.parse_json_pipeline_spec(TEST_PIPELINE_SPEC)
check_pipeline_spec(req)
def test_parse_dict_pipeline_spec():
req = python_pachyderm.experimental.parse_dict_pipeline_spec(
json.loads(TEST_PIPELINE_SPEC)
)
check_pipeline_spec(req)
def check_pipeline_spec(req):
assert req == pps_proto.CreatePipelineRequest(
pipeline=pps_proto.Pipeline(name="foobar"),
description="A pipeline that performs image edge detection by using the OpenCV library.",
input=pps_proto.Input(
pfs=pps_proto.PfsInput(glob="/*", repo="images"),
),
transform=pps_proto.Transform(
cmd=["python3", "/edges.py"],
image="pachyderm/opencv",
),
)
| true | true |
f72fad937cc7df0f5730a5937036f84b9f33c80d | 1,322 | py | Python | src/evaluator.py | ryoutoku/real-coded-genetic-algorithm | 4d0c72527e497e57e18c56d43e8c217a388ca86f | [
"MIT"
] | 2 | 2019-05-14T02:04:02.000Z | 2020-04-02T00:19:01.000Z | src/evaluator.py | ryoutoku/real-coded-genetic-algorithm | 4d0c72527e497e57e18c56d43e8c217a388ca86f | [
"MIT"
] | null | null | null | src/evaluator.py | ryoutoku/real-coded-genetic-algorithm | 4d0c72527e497e57e18c56d43e8c217a388ca86f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import math
from individual import Individual
class Evaluator(metaclass=ABCMeta):
def __init__(self):
Individual.set_evaluator(self)
def evaluate(self, individual):
"""個体を評価する
Args:
individual (individual): 評価する個体
Returns:
float: 評価値
"""
return self._evaluate_function(individual.gene)
@abstractmethod
def _evaluate_function(self, gene):
"""実際に個体を評価する実態
Args:
gene (np.array): 評価する遺伝子
"""
pass
class Sphere(Evaluator):
def _evaluate_function(self, gene):
"""Sphere関数として評価する
f(x_1...x_n) = x_1**2 + x_2**2 + ... x_n**2
Args:
gene (np.array): 評価する遺伝子
Returns:
float: 評価値
"""
return (gene**2).sum()
class Rosenbrock(Evaluator):
def _evaluate_function(self, gene):
"""Rosenbrock関数として評価する
f(x_1...x_n = 100(x_2 - x_1**2)**2 + (x_1 - 1)**2 + ...
Args:
gene (np.array): 評価する遺伝子
Returns:
float: 評価値
"""
result = 0.0
for gene_1, gene_2 in zip(gene, gene[1:]):
result += 100.0 * (gene_2 - gene_1**2) ** 2 + (gene_1 - 1)**2
return result
| 20.338462 | 73 | 0.53177 |
from abc import ABCMeta, abstractmethod
import math
from individual import Individual
class Evaluator(metaclass=ABCMeta):
def __init__(self):
Individual.set_evaluator(self)
def evaluate(self, individual):
return self._evaluate_function(individual.gene)
@abstractmethod
def _evaluate_function(self, gene):
pass
class Sphere(Evaluator):
def _evaluate_function(self, gene):
return (gene**2).sum()
class Rosenbrock(Evaluator):
def _evaluate_function(self, gene):
result = 0.0
for gene_1, gene_2 in zip(gene, gene[1:]):
result += 100.0 * (gene_2 - gene_1**2) ** 2 + (gene_1 - 1)**2
return result
| true | true |
f72fae73b6d958f9d94b097c0f0e32534effcad9 | 21,412 | py | Python | cogs/leveling.py | DevRohit06/Abode | 5864cdf3c74d51bea325d123e075a1becc541c91 | [
"MIT"
] | 15 | 2020-12-28T12:07:09.000Z | 2021-08-21T15:35:53.000Z | cogs/leveling.py | DevRohit06/Abode | 5864cdf3c74d51bea325d123e075a1becc541c91 | [
"MIT"
] | null | null | null | cogs/leveling.py | DevRohit06/Abode | 5864cdf3c74d51bea325d123e075a1becc541c91 | [
"MIT"
] | 8 | 2020-12-29T04:24:12.000Z | 2021-04-04T23:29:13.000Z | import discord
from discord.ext import commands, tasks
import datetime
import random
from prettytable import PrettyTable
import random
from random import randint
data = ['Water', 'Air', 'Earth', 'Fire', 'Destruction',
'Illusion', 'Time', 'Space', 'Karma', 'Chaos']
paths = random.choice(data)
luck = random.randint(1, 100)
data1 = ['Demon', 'Human', 'Dragon', 'Beast',
'Phoenix', 'Spirit', 'Giant', 'Fey']
color = 0xa100f2
guild = 757098499836739594
class vein8(commands.Cog, name='leveling'):
def __init__(self, Bot):
self.Bot = Bot
self.Bot.scholar_chat = self.Bot.get_channel(757108786497585172)
async def ModLog(self,ctx,commandname =None ,mod= None, target = None, amount :3 =None, Reason =None,
channel=None, content = None, jump = None):
guild = self.Bot.get_guild(self.Bot.guild_id)
log_channel= self.Bot.get_channel(759583119396700180)
embed = discord.Embed(color = random.choice(self.Bot.color_list),timestamp = datetime.datetime.utcnow())
embed.set_author(name=f"{commandname}",icon_url=ctx.author.avatar_url)
if mod !=None:
embed.add_field(name = "Mod", value = f"{mod.display_name} | {mod.mention}")
if target != None:
embed.add_field(name = "Target", value = f"{target.display_name} | {target.mention}")
if amount != None:
embed.add_field(name= "Amount", value= f'``{amount}``', inline=False)
if channel!= None:
embed.add_field(name = "On channel", value=f"{channel}")
if content!= None:
embed.add_field(name = "Content", value= f"```css\n{content}```", inline=False)
if jump != None:
embed.add_field(name = "Jump", value = f"[Here]({jump})")
if Reason !=None:
embed.add_field(name= "Reason ", value= f"```css\n{Reason}```", inline=False)
embed.set_thumbnail(url = guild.icon_url)
embed.set_footer(icon_url = mod.avatar_url)
await log_channel.send(embed=embed)
return self.ModLog
@commands.Cog.listener()
@commands.guild_only()
async def on_message(self, message):
# remove the unnecessay things
if isinstance(message.channel, discord.channel.DMChannel):
return
if message.guild.id != 757098499836739594:
return
if message.author.id == 759784064361299989:
return
if message.author.bot:
return
if self.Bot.DEFAULT_PREFIX == '&':
return
race = random.choice(data1)
strength = random.randint(1, 10)
speed = random.randint(1, 10)
defense = random.randint(1, 10)
soul = random.randint(1, 10)
Hp = random.randint(50, 350)
#My server memers ain't lower than 50, that's for sure :)
wisdom = random.randint(50, 100)
bot1 = message.guild.get_channel(781535649843904562)
bot2 = message.guild.get_channel(757136943149613076)
music = message.guild.get_channel(768684108770574366)
testing = message.guild.get_channel(757941959796195484)
if message.channel.id == bot1.id:
return
if message.channel.id == (music.id) or message.channel.id == (testing.id):
return
author_id = str(message.author.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": author_id}
# checks if user is in the db or not
if (collection.find_one({"_id": author_id}) == None):
leauge = "Novice scholar"
Realm = "Mortal"
Path = paths
lol = "No aliases"
user_data = {"_id": author_id, "points": 1, "Leauge": leauge, "Qi": 0, "Daos": 0, "Path": Path, "Realm": Realm, "Luck": luck,
"Species": race, "Strength": strength, "Speed": speed, "Defense": defense, "Soul": soul, "Health": Hp, "Name": lol
, "Wisdom": wisdom}
collection.insert_one(user_data)
else:
query = {"_id": author_id}
level = collection.find(query)
for lvl in level:
cur_p = lvl['points']
new_p = cur_p + 1
# this is a mess
cur_q = lvl['Qi']
new_q = cur_q + 0.25
Leauge = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
if (new_q % 200) == 0:
await message.channel.send(f'<:Cuppedfist:757112296094040104> Congragulations! {message.author.mention}, your Qi just reached **{new_q}**.')
elif (new_q % 600) == 0:
await message.channel.send(f'{message.author}, you now have comprehendded ``{dao}`` heavenly dao(s).')
collection.update_one({"_id": author_id}, {
"$set": {"Daos": +1}})
if (new_q == 500):
ok = 'Star'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Star realm`` expert.\nAnd also, you earned the ``intermediate scholar`` medal.')
new_medal1 = 'Intermediate scholar'
collection.update_one({"_id": author_id}, {
"$set": {"Leauge": new_medal1}})
elif (new_q == 1500):
await message.channel.send(f'<:Cuppedfist:757112296094040104> {member.author.mention}, Congragulations you earned the ``Expert scholar`` medal.')
new_medal2 = 'Expert scholar'
collection.upate_one({"_id": author_id}, {
"$set": {"Leauge": new_medal2}})
elif (new_q % 10) == 0:
strength1 = random.randint(1, 15)
speed1 = random.randint(1, 10)
defense1 = random.randint(1, 25)
soul1 = random.randint(1, 5)
Hp1 = random.randint(1, 20)
collection.update_one({"_id": author_id}, {
"$set": {"Strength": stre + strength1}})
collection.update_one({"_id": author_id}, {
"$set": {"Speed": sped + speed1}})
collection.update_one({"_id": author_id}, {
"$set": {"Defense": defen + defense1}})
collection.update_one({"_id": author_id}, {
"$set": {"Soul": sol + soul1}})
collection.update_one({"_id": author_id}, {
"$set": {"Health": health + Hp1}})
if (new_q == 1100):
ok = 'Transcendent'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'{message.author.mention},<:Cuppedfist:757112296094040104> Congragulations! you just brokethrough to become a ``Transcendent realm`` expert.')
if (new_q == 2500):
ok = 'Saint'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Saint realm`` expert.')
if (new_q == 5100):
ok = 'God'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``God realm``expert.')
if (new_q == 10001):
ok = 'Chaotic'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Chaotic realm`` expert.')
collection.update_one({"_id": author_id}, {
"$set": {'points': new_p}})
collection.update_one({"_id": author_id}, {
"$set": {"Qi": new_q}})
@commands.command(aliases=['apoints'], hidden=True)
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def addpoints(self, ctx, member: discord.Member, amount, *, reason=None):
channel = ctx.guild.get_channel(780785741101137926)
if ctx.guild.id != (guild):
return await ctx.send('<:WeirdChamp:757112297096216627> Come to the main server if you dare.')
if int(amount) <= 2000:
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": memeber_id}
query = {"_id": memeber_id}
points = collection.find(query)
if collection.find_one({"_id": memeber_id} == None):
await ctx.send(f"{ctx.author.name}, No such user by the name {member.name} exists. ")
for point in points:
old_p = point['points']
amount_n = int(amount)
new_p = (int(old_p) + int(amount_n))
collection.update_one({"_id": memeber_id}, {
"$set": {"points": new_p}})
await ctx.send(f"Sucessfully added ``{amount}`` points to {member.name}. Now {member.name} has ``{new_p}`` in total.")
await self.ModLog(ctx = ctx, mod= ctx.author, target=member, commandname="Points Given!", channel=ctx.channel.mention
, amount = amount, jump= ctx.message.jump_url, Reason=reason)
elif int(amount) >= 2000:
await ctx.send(f"<:WeirdChamp:757112297096216627> {ctx.author.name}, 2000 is the limit for now.")
@commands.command(aliases=['rpoints'], hidden=True)
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def removepoints(self, ctx, member: discord.Member, amount, *, Reason):
channel = ctx.guild.get_channel(780785741101137926)
if ctx.guild.id != 757098499836739594:
return await ctx.send('<:WeirdChamp:757112297096216627> Come to the main server if you dare.')
if ctx.author.top_role < member.top_role:
return await ctx.send("You can't remove points of someone higher than you.")
if int(amount) <= 2000:
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": memeber_id}
query = {"_id": memeber_id}
points = collection.find(query)
if collection.find_one({"_id": memeber_id} == None):
await ctx.send(f"{ctx.author.name}, No such user by the name {member.name} exists. ")
for point in points:
old_p = point['points']
amount_n = int(amount)
new_p = (int(old_p) - int(amount_n))
collection.update_one({"_id": memeber_id}, {
"$set": {"points": new_p}})
await ctx.send(f"Sucessfully removed ``{amount}`` points from {member.name}. Now {member.name} has ``{new_p}`` in total.")
await self.ModLog(ctx = ctx, mod= ctx.author, target=member, commandname="Points Removed!", channel=ctx.channel.mention
, amount = amount, jump= ctx.message.jump_url, Reason=reason)
else:
await ctx.send(f"{ctx.author.name}, you can't remove more than 2000 points. <:WeirdChamp:757112297096216627>")
@commands.command(aliases=["points", "qi", "p", 'stats'], description=f'Show your stats and general info.')
@commands.guild_only()
async def point(self, ctx):
if ctx.message.channel.id == 757108786497585172:
return
try:
member = ctx.author
member_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
qurey = {"_id": member_id}
users = collection.find(qurey)
total = collection.count()
hm = collection.find().sort("Qi", -1)
a = 0
for x in hm:
idd = x["_id"]
if idd == member_id:
break
else:
a += 1
for lvl in users:
_id = lvl['_id']
points = lvl['points']
medal = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
luk = lvl['Luck']
qi = lvl['Qi']
realm = lvl['Realm']
speci = lvl['Species']
pth = lvl['Path']
nme = lvl['Name']
try:
wisdom = lvl['Wisdom']
except:
wisdom = "Use .update to see your wisom"
embed = discord.Embed(
color=color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_author(name=f'{member.name} ',
icon_url=f'{member.avatar_url}')
embed.add_field(name=f'__#{int(a) +1}/{total}__', value=f'**Aliases** :{nme} \n'
f'**Realm** : {str(realm)}\n'
f'**Species** : {str(speci)}')
embed.add_field(name="__Legacy__", value=f'**Path** : {str(pth)}\n'
f'**Medals** : {str(medal)}\n'
f'**Daos** : {str(dao)}')
embed.add_field(name='__Accomplishments__', value=f'**Qi : ** {str(qi)}\n'
f'**Points : ** {str(points)}\n'
f' **Luck : ** {str(luk)}'
)
embed.add_field(name='__Stats__', value=f'**Strength :** {str(stre)}\n'
f'**Defense :** {str(defen)}\n'
f'**Speed** : {str(sped)}\n'
f'**Soul : **{str(sol)}\n'
f'**Health : ** {str(health)}\n'
f'**Wisdom : ** {str(wisdom)}')
embed.set_footer(text=f"Abode of Scholars")
await ctx.send(embed=embed)
except:
await ctx.send(f'Your data probably isn\'nt saved on the database.')
@commands.command(aliases=["puser", "statsu"], description=f'Shows shats on another user, be sure to use the user id.')
@commands.guild_only()
async def pu(self, ctx, member_id: int):
if ctx.guild.id != (guild):
return
member = ctx.guild.get_member(member_id)
member_id = str(member_id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
qurey = {"_id": member_id}
users = collection.find(qurey)
total = collection.count()
hm = collection.find().sort("Qi", -1)
a = 0
for x in hm:
idd = x["_id"]
if idd == member_id:
break
else:
a += 1
for lvl in users:
_id = lvl['_id']
points = lvl['points']
medal = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
luk = lvl['Luck']
qi = lvl['Qi']
realm = lvl['Realm']
speci = lvl['Species']
pth = lvl['Path']
try:
wisdom = lvl['wisdom']
except:
wisdom = "Use .update to see your wisom"
embed = discord.Embed(
color=color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_author(name=f'{member.name} ',
icon_url=f'{member.avatar_url}')
embed.add_field(name=f'__Main__', value=f'**Rank** : #{int(a) +1}/{total}\n'
f'**Realm** : {str(realm)}\n'
f'**Species** : {str(speci)}')
embed.add_field(name="__Legacy__", value=f'**Path** : {str(pth)}\n'
f'**Medals** : {str(medal)}\n'
f'**Daos** : {str(dao)}')
embed.add_field(name='__Accomplishments__', value=f'**Qi : ** {str(qi)}\n'
f'**Points : ** {str(points)}\n'
f' **Luck : ** {str(luk)}', inline=False)
embed.add_field(name='__Stats__', value=f'**Strength :** {str(stre)}\n'
f'**Defense :** {str(defen)}\n'
f'**Speed** : {str(sped)}\n'
f'**Soul : **{str(sol)}\n'
f'**Health : ** {str(health)}\n'
f'**Wisdom :** {str(wisdom)} ')
embed.set_footer(text=f"Abode of Scholars")
await ctx.send(embed=embed)
@commands.command(aliases=['aliases', 'cname'], description=f'Add your cultivator name.')
@commands.guild_only()
async def nickname(self, ctx, *, arg):
if len(arg) > 10:
return await ctx.send('Bruh you can\'t go over 10 characthers.')
if ctx.guild.id != (guild):
return
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = str(ctx.author.id)
name = str(arg)
name = str(arg)
collection.update_one({"_id": user_id}, {"$set": {"Name": name}})
await ctx.send(f'{ctx.author.mention} Your cultivator name was sucessfully set to {arg}.')
@commands.command(aliases=["lb"], description='Shows the top 10 cultivators on the server.')
@commands.guild_only()
async def leaderboard(self, ctx):
if ctx.channel.id == self.Bot.scholar_chat:
return
member = discord.Member or ctx.author
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
collection2 = db['Levels1']
users = collection.find().sort("Qi", -1).limit(10)
names = collection2.find().sort("Name", 1)
a2 = []
nme1 = []
name2 = []
pts1 = []
pth1 = []
table = PrettyTable()
table1 = PrettyTable()
a = 0
table.field_names = ["Rank", "Aliases", "Qi", "Points", "Path"]
table1.field_names = ["Rank", "Aliases", "Qi", "Points"]
table.align = "c"
for u in users:
user_id = u['_id']
qi = u['Qi']
pts = u['points']
pth = u['Path']
nme = u['Name']
a += 1
hm = str(pts)
hm1 = str(qi)
pts1.append(hm)
nme1.append(nme)
name2.append(hm1)
pth1.append(pth)
'''embed.add_field(name='Aliases', value=f"\n\n".join(nme1))
embed.add_field(name='Qi', value="\n\n".join(name2))
embed.add_field(name="Points", value=" \n\n ".join(pts1))
#embed.add_field(name=f"{a}", value=f'**Aliases : {nme}** \n**Qi : ** {qi}\n**Points : ** {pts} \n**Path : **{pth}')
embed.set_footer(text=f'To remove the \'None\' from your name, add your Cultivator name through .aliases')
await ctx.send(embed=embed)'''
table.add_row([a, f'{nme}', qi, pts, f'{pth}'])
table1.add_row([a, f'{nme}', qi, pts])
if ctx.author.is_on_mobile():
await ctx.send(f'```prolog\n{table}```')
else:
embed = discord.Embed(
title="Leaderboard \n``You can add your aliases by [.aliases <yourname>]``", color=color, description=f'```prolog\n{table1}```')
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_footer(text=f'Requested by {ctx.author.name}')
await ctx.send(embed=embed)
def setup(Bot):
Bot.add_cog(vein8(Bot))
print("Leveling cog is working.")
| 42.066798 | 240 | 0.500841 | import discord
from discord.ext import commands, tasks
import datetime
import random
from prettytable import PrettyTable
import random
from random import randint
data = ['Water', 'Air', 'Earth', 'Fire', 'Destruction',
'Illusion', 'Time', 'Space', 'Karma', 'Chaos']
paths = random.choice(data)
luck = random.randint(1, 100)
data1 = ['Demon', 'Human', 'Dragon', 'Beast',
'Phoenix', 'Spirit', 'Giant', 'Fey']
color = 0xa100f2
guild = 757098499836739594
class vein8(commands.Cog, name='leveling'):
def __init__(self, Bot):
self.Bot = Bot
self.Bot.scholar_chat = self.Bot.get_channel(757108786497585172)
async def ModLog(self,ctx,commandname =None ,mod= None, target = None, amount :3 =None, Reason =None,
channel=None, content = None, jump = None):
guild = self.Bot.get_guild(self.Bot.guild_id)
log_channel= self.Bot.get_channel(759583119396700180)
embed = discord.Embed(color = random.choice(self.Bot.color_list),timestamp = datetime.datetime.utcnow())
embed.set_author(name=f"{commandname}",icon_url=ctx.author.avatar_url)
if mod !=None:
embed.add_field(name = "Mod", value = f"{mod.display_name} | {mod.mention}")
if target != None:
embed.add_field(name = "Target", value = f"{target.display_name} | {target.mention}")
if amount != None:
embed.add_field(name= "Amount", value= f'``{amount}``', inline=False)
if channel!= None:
embed.add_field(name = "On channel", value=f"{channel}")
if content!= None:
embed.add_field(name = "Content", value= f"```css\n{content}```", inline=False)
if jump != None:
embed.add_field(name = "Jump", value = f"[Here]({jump})")
if Reason !=None:
embed.add_field(name= "Reason ", value= f"```css\n{Reason}```", inline=False)
embed.set_thumbnail(url = guild.icon_url)
embed.set_footer(icon_url = mod.avatar_url)
await log_channel.send(embed=embed)
return self.ModLog
@commands.Cog.listener()
@commands.guild_only()
async def on_message(self, message):
if isinstance(message.channel, discord.channel.DMChannel):
return
if message.guild.id != 757098499836739594:
return
if message.author.id == 759784064361299989:
return
if message.author.bot:
return
if self.Bot.DEFAULT_PREFIX == '&':
return
race = random.choice(data1)
strength = random.randint(1, 10)
speed = random.randint(1, 10)
defense = random.randint(1, 10)
soul = random.randint(1, 10)
Hp = random.randint(50, 350)
wisdom = random.randint(50, 100)
bot1 = message.guild.get_channel(781535649843904562)
bot2 = message.guild.get_channel(757136943149613076)
music = message.guild.get_channel(768684108770574366)
testing = message.guild.get_channel(757941959796195484)
if message.channel.id == bot1.id:
return
if message.channel.id == (music.id) or message.channel.id == (testing.id):
return
author_id = str(message.author.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": author_id}
if (collection.find_one({"_id": author_id}) == None):
leauge = "Novice scholar"
Realm = "Mortal"
Path = paths
lol = "No aliases"
user_data = {"_id": author_id, "points": 1, "Leauge": leauge, "Qi": 0, "Daos": 0, "Path": Path, "Realm": Realm, "Luck": luck,
"Species": race, "Strength": strength, "Speed": speed, "Defense": defense, "Soul": soul, "Health": Hp, "Name": lol
, "Wisdom": wisdom}
collection.insert_one(user_data)
else:
query = {"_id": author_id}
level = collection.find(query)
for lvl in level:
cur_p = lvl['points']
new_p = cur_p + 1
cur_q = lvl['Qi']
new_q = cur_q + 0.25
Leauge = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
if (new_q % 200) == 0:
await message.channel.send(f'<:Cuppedfist:757112296094040104> Congragulations! {message.author.mention}, your Qi just reached **{new_q}**.')
elif (new_q % 600) == 0:
await message.channel.send(f'{message.author}, you now have comprehendded ``{dao}`` heavenly dao(s).')
collection.update_one({"_id": author_id}, {
"$set": {"Daos": +1}})
if (new_q == 500):
ok = 'Star'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Star realm`` expert.\nAnd also, you earned the ``intermediate scholar`` medal.')
new_medal1 = 'Intermediate scholar'
collection.update_one({"_id": author_id}, {
"$set": {"Leauge": new_medal1}})
elif (new_q == 1500):
await message.channel.send(f'<:Cuppedfist:757112296094040104> {member.author.mention}, Congragulations you earned the ``Expert scholar`` medal.')
new_medal2 = 'Expert scholar'
collection.upate_one({"_id": author_id}, {
"$set": {"Leauge": new_medal2}})
elif (new_q % 10) == 0:
strength1 = random.randint(1, 15)
speed1 = random.randint(1, 10)
defense1 = random.randint(1, 25)
soul1 = random.randint(1, 5)
Hp1 = random.randint(1, 20)
collection.update_one({"_id": author_id}, {
"$set": {"Strength": stre + strength1}})
collection.update_one({"_id": author_id}, {
"$set": {"Speed": sped + speed1}})
collection.update_one({"_id": author_id}, {
"$set": {"Defense": defen + defense1}})
collection.update_one({"_id": author_id}, {
"$set": {"Soul": sol + soul1}})
collection.update_one({"_id": author_id}, {
"$set": {"Health": health + Hp1}})
if (new_q == 1100):
ok = 'Transcendent'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'{message.author.mention},<:Cuppedfist:757112296094040104> Congragulations! you just brokethrough to become a ``Transcendent realm`` expert.')
if (new_q == 2500):
ok = 'Saint'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Saint realm`` expert.')
if (new_q == 5100):
ok = 'God'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``God realm``expert.')
if (new_q == 10001):
ok = 'Chaotic'
collection.update_one({"_id": author_id}, {
"$set": {"Realm": ok}})
await message.channel.send(f'<:Cuppedfist:757112296094040104> {message.author.mention} Congragulations! you just brokethrough to become a ``Chaotic realm`` expert.')
collection.update_one({"_id": author_id}, {
"$set": {'points': new_p}})
collection.update_one({"_id": author_id}, {
"$set": {"Qi": new_q}})
@commands.command(aliases=['apoints'], hidden=True)
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def addpoints(self, ctx, member: discord.Member, amount, *, reason=None):
channel = ctx.guild.get_channel(780785741101137926)
if ctx.guild.id != (guild):
return await ctx.send('<:WeirdChamp:757112297096216627> Come to the main server if you dare.')
if int(amount) <= 2000:
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": memeber_id}
query = {"_id": memeber_id}
points = collection.find(query)
if collection.find_one({"_id": memeber_id} == None):
await ctx.send(f"{ctx.author.name}, No such user by the name {member.name} exists. ")
for point in points:
old_p = point['points']
amount_n = int(amount)
new_p = (int(old_p) + int(amount_n))
collection.update_one({"_id": memeber_id}, {
"$set": {"points": new_p}})
await ctx.send(f"Sucessfully added ``{amount}`` points to {member.name}. Now {member.name} has ``{new_p}`` in total.")
await self.ModLog(ctx = ctx, mod= ctx.author, target=member, commandname="Points Given!", channel=ctx.channel.mention
, amount = amount, jump= ctx.message.jump_url, Reason=reason)
elif int(amount) >= 2000:
await ctx.send(f"<:WeirdChamp:757112297096216627> {ctx.author.name}, 2000 is the limit for now.")
@commands.command(aliases=['rpoints'], hidden=True)
@commands.guild_only()
@commands.has_permissions(manage_roles=True)
async def removepoints(self, ctx, member: discord.Member, amount, *, Reason):
channel = ctx.guild.get_channel(780785741101137926)
if ctx.guild.id != 757098499836739594:
return await ctx.send('<:WeirdChamp:757112297096216627> Come to the main server if you dare.')
if ctx.author.top_role < member.top_role:
return await ctx.send("You can't remove points of someone higher than you.")
if int(amount) <= 2000:
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = {"_id": memeber_id}
query = {"_id": memeber_id}
points = collection.find(query)
if collection.find_one({"_id": memeber_id} == None):
await ctx.send(f"{ctx.author.name}, No such user by the name {member.name} exists. ")
for point in points:
old_p = point['points']
amount_n = int(amount)
new_p = (int(old_p) - int(amount_n))
collection.update_one({"_id": memeber_id}, {
"$set": {"points": new_p}})
await ctx.send(f"Sucessfully removed ``{amount}`` points from {member.name}. Now {member.name} has ``{new_p}`` in total.")
await self.ModLog(ctx = ctx, mod= ctx.author, target=member, commandname="Points Removed!", channel=ctx.channel.mention
, amount = amount, jump= ctx.message.jump_url, Reason=reason)
else:
await ctx.send(f"{ctx.author.name}, you can't remove more than 2000 points. <:WeirdChamp:757112297096216627>")
@commands.command(aliases=["points", "qi", "p", 'stats'], description=f'Show your stats and general info.')
@commands.guild_only()
async def point(self, ctx):
if ctx.message.channel.id == 757108786497585172:
return
try:
member = ctx.author
member_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
qurey = {"_id": member_id}
users = collection.find(qurey)
total = collection.count()
hm = collection.find().sort("Qi", -1)
a = 0
for x in hm:
idd = x["_id"]
if idd == member_id:
break
else:
a += 1
for lvl in users:
_id = lvl['_id']
points = lvl['points']
medal = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
luk = lvl['Luck']
qi = lvl['Qi']
realm = lvl['Realm']
speci = lvl['Species']
pth = lvl['Path']
nme = lvl['Name']
try:
wisdom = lvl['Wisdom']
except:
wisdom = "Use .update to see your wisom"
embed = discord.Embed(
color=color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_author(name=f'{member.name} ',
icon_url=f'{member.avatar_url}')
embed.add_field(name=f'__#{int(a) +1}/{total}__', value=f'**Aliases** :{nme} \n'
f'**Realm** : {str(realm)}\n'
f'**Species** : {str(speci)}')
embed.add_field(name="__Legacy__", value=f'**Path** : {str(pth)}\n'
f'**Medals** : {str(medal)}\n'
f'**Daos** : {str(dao)}')
embed.add_field(name='__Accomplishments__', value=f'**Qi : ** {str(qi)}\n'
f'**Points : ** {str(points)}\n'
f' **Luck : ** {str(luk)}'
)
embed.add_field(name='__Stats__', value=f'**Strength :** {str(stre)}\n'
f'**Defense :** {str(defen)}\n'
f'**Speed** : {str(sped)}\n'
f'**Soul : **{str(sol)}\n'
f'**Health : ** {str(health)}\n'
f'**Wisdom : ** {str(wisdom)}')
embed.set_footer(text=f"Abode of Scholars")
await ctx.send(embed=embed)
except:
await ctx.send(f'Your data probably isn\'nt saved on the database.')
@commands.command(aliases=["puser", "statsu"], description=f'Shows shats on another user, be sure to use the user id.')
@commands.guild_only()
async def pu(self, ctx, member_id: int):
if ctx.guild.id != (guild):
return
member = ctx.guild.get_member(member_id)
member_id = str(member_id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
qurey = {"_id": member_id}
users = collection.find(qurey)
total = collection.count()
hm = collection.find().sort("Qi", -1)
a = 0
for x in hm:
idd = x["_id"]
if idd == member_id:
break
else:
a += 1
for lvl in users:
_id = lvl['_id']
points = lvl['points']
medal = lvl['Leauge']
dao = lvl['Daos']
stre = lvl['Strength']
sped = lvl['Speed']
defen = lvl['Defense']
sol = lvl['Soul']
health = lvl['Health']
luk = lvl['Luck']
qi = lvl['Qi']
realm = lvl['Realm']
speci = lvl['Species']
pth = lvl['Path']
try:
wisdom = lvl['wisdom']
except:
wisdom = "Use .update to see your wisom"
embed = discord.Embed(
color=color, timestamp=datetime.datetime.utcnow())
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_author(name=f'{member.name} ',
icon_url=f'{member.avatar_url}')
embed.add_field(name=f'__Main__', value=f'**Rank** :
f'**Realm** : {str(realm)}\n'
f'**Species** : {str(speci)}')
embed.add_field(name="__Legacy__", value=f'**Path** : {str(pth)}\n'
f'**Medals** : {str(medal)}\n'
f'**Daos** : {str(dao)}')
embed.add_field(name='__Accomplishments__', value=f'**Qi : ** {str(qi)}\n'
f'**Points : ** {str(points)}\n'
f' **Luck : ** {str(luk)}', inline=False)
embed.add_field(name='__Stats__', value=f'**Strength :** {str(stre)}\n'
f'**Defense :** {str(defen)}\n'
f'**Speed** : {str(sped)}\n'
f'**Soul : **{str(sol)}\n'
f'**Health : ** {str(health)}\n'
f'**Wisdom :** {str(wisdom)} ')
embed.set_footer(text=f"Abode of Scholars")
await ctx.send(embed=embed)
@commands.command(aliases=['aliases', 'cname'], description=f'Add your cultivator name.')
@commands.guild_only()
async def nickname(self, ctx, *, arg):
if len(arg) > 10:
return await ctx.send('Bruh you can\'t go over 10 characthers.')
if ctx.guild.id != (guild):
return
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
user_id = str(ctx.author.id)
name = str(arg)
name = str(arg)
collection.update_one({"_id": user_id}, {"$set": {"Name": name}})
await ctx.send(f'{ctx.author.mention} Your cultivator name was sucessfully set to {arg}.')
@commands.command(aliases=["lb"], description='Shows the top 10 cultivators on the server.')
@commands.guild_only()
async def leaderboard(self, ctx):
if ctx.channel.id == self.Bot.scholar_chat:
return
member = discord.Member or ctx.author
memeber_id = str(member.id)
db = self.Bot.cluster1['AbodeDB']
collection = db['Levels']
collection2 = db['Levels1']
users = collection.find().sort("Qi", -1).limit(10)
names = collection2.find().sort("Name", 1)
a2 = []
nme1 = []
name2 = []
pts1 = []
pth1 = []
table = PrettyTable()
table1 = PrettyTable()
a = 0
table.field_names = ["Rank", "Aliases", "Qi", "Points", "Path"]
table1.field_names = ["Rank", "Aliases", "Qi", "Points"]
table.align = "c"
for u in users:
user_id = u['_id']
qi = u['Qi']
pts = u['points']
pth = u['Path']
nme = u['Name']
a += 1
hm = str(pts)
hm1 = str(qi)
pts1.append(hm)
nme1.append(nme)
name2.append(hm1)
pth1.append(pth)
table.add_row([a, f'{nme}', qi, pts, f'{pth}'])
table1.add_row([a, f'{nme}', qi, pts])
if ctx.author.is_on_mobile():
await ctx.send(f'```prolog\n{table}```')
else:
embed = discord.Embed(
title="Leaderboard \n``You can add your aliases by [.aliases <yourname>]``", color=color, description=f'```prolog\n{table1}```')
embed.set_thumbnail(url=f'{ctx.guild.icon_url}')
embed.set_footer(text=f'Requested by {ctx.author.name}')
await ctx.send(embed=embed)
def setup(Bot):
Bot.add_cog(vein8(Bot))
print("Leveling cog is working.")
| true | true |
f72fb037fc55f7ca2cf905ff81db0de58b739c7f | 363 | py | Python | trips-backend/tours/migrations/0003_auto_20210312_1921.py | pgarr/best-trips | edc45f6e822b70aa9bfa6f9d4ee8b2c58df54310 | [
"MIT"
] | null | null | null | trips-backend/tours/migrations/0003_auto_20210312_1921.py | pgarr/best-trips | edc45f6e822b70aa9bfa6f9d4ee8b2c58df54310 | [
"MIT"
] | null | null | null | trips-backend/tours/migrations/0003_auto_20210312_1921.py | pgarr/best-trips | edc45f6e822b70aa9bfa6f9d4ee8b2c58df54310 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-12 18:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tours', '0002_auto_20210308_2016'),
]
operations = [
migrations.RenameField(
model_name='reservation',
old_name='user',
new_name='owner',
),
]
| 19.105263 | 47 | 0.584022 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tours', '0002_auto_20210308_2016'),
]
operations = [
migrations.RenameField(
model_name='reservation',
old_name='user',
new_name='owner',
),
]
| true | true |
f72fb07aed75906e4fd55adb3081b3b4d85b4c83 | 108 | py | Python | runscript_DDmass.py | HarrisonWinch96/DarkDisk_Microlensing | e25d59051771318239116a8d2036aca8ce70236d | [
"BSD-3-Clause"
] | null | null | null | runscript_DDmass.py | HarrisonWinch96/DarkDisk_Microlensing | e25d59051771318239116a8d2036aca8ce70236d | [
"BSD-3-Clause"
] | null | null | null | runscript_DDmass.py | HarrisonWinch96/DarkDisk_Microlensing | e25d59051771318239116a8d2036aca8ce70236d | [
"BSD-3-Clause"
] | null | null | null | from plotgen_functions import DD_mass_constraints
import sys
q = float(sys.argv[1])
DD_mass_constraints(q) | 18 | 49 | 0.824074 | from plotgen_functions import DD_mass_constraints
import sys
q = float(sys.argv[1])
DD_mass_constraints(q) | true | true |
f72fb1455f5a6d221b2e71016d342136e0b43efe | 127 | py | Python | instance/config.py | ags68/CS4398Project | a9652a3670fdbfd2f482104b77573a2d1f138c69 | [
"Unlicense"
] | 3 | 2017-02-27T02:13:52.000Z | 2017-03-05T03:54:25.000Z | instance/config.py | ags68/CS4398Project | a9652a3670fdbfd2f482104b77573a2d1f138c69 | [
"Unlicense"
] | null | null | null | instance/config.py | ags68/CS4398Project | a9652a3670fdbfd2f482104b77573a2d1f138c69 | [
"Unlicense"
] | null | null | null | # instance/config.py
SECRET_KEY = 'p9Bv<3Eid9%$i01'
SQLALCHEMY_DATABASE_URI = 'mysql://esss_admin:esss2017@localhost/esss_db'
| 25.4 | 73 | 0.787402 |
SECRET_KEY = 'p9Bv<3Eid9%$i01'
SQLALCHEMY_DATABASE_URI = 'mysql://esss_admin:esss2017@localhost/esss_db'
| true | true |
f72fb15cca4227d9405d5ece1245eb1451ce43c4 | 134,835 | py | Python | Lib/test/test_ssl.py | harveyqing/read_cPython_source | 3ff91638c8c9df6e5ac8dd5235447b5571781535 | [
"PSF-2.0"
] | 36 | 2015-02-04T10:43:31.000Z | 2022-03-30T13:01:12.000Z | Lib/test/test_ssl.py | harveyqing/read_cPython_source | 3ff91638c8c9df6e5ac8dd5235447b5571781535 | [
"PSF-2.0"
] | 9 | 2015-03-17T05:56:16.000Z | 2021-11-17T09:31:50.000Z | src/test/python/cpython-3f944f44ee41/Lib/test/test_ssl.py | bkiers/python3-parser | 38496e94b3935e5abca6ab86fd98136c51f92fd7 | [
"MIT"
] | 22 | 2015-05-13T17:37:35.000Z | 2022-01-25T06:24:42.000Z | # Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
from unittest import mock
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh512.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_SSLv3
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv3')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 2.0
self.assertLess(n, 0x20000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 2)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by OpenSSL, the format might change
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
ctx.options)
ctx.options |= ssl.OP_NO_SSLv3
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value by OpenSSL
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
rc = s.connect_ex(("svn.python.org", 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
self.assertIn(rc, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
if ssl.HAS_SNI:
s.connect(("svn.python.org", 443))
s.close()
else:
self.assertRaises(ValueError, s.connect, ("svn.python.org", 443))
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet("svn.python.org"):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(("svn.python.org", 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_protocols.append(self.sslconn.selected_npn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_npn_protocol': s.selected_npn_protocol()
})
s.close()
stats['server_npn_protocols'] = server.selected_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
@needs_sni
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
# Server with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True,
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://%s:%d/%s' % (
HOST, server.port, os.path.split(CERTFILE)[1])
f = urllib.request.urlopen(url)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| 44.780804 | 117 | 0.553135 |
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
from unittest import mock
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CRLFILE = data_file("revocation.crl")
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh512.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone
return -time.timezone
def asn1time(cert_time):
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
OL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
proto = ssl.PROTOCOL_SSLv3
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv3')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 2.0
self.assertLess(n, 0x20000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 2)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by OpenSSL, the format might change
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
fail(cert, '<unsupported>')
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
0:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT")
self.cert_time_fail("Jan 5 09:34:43 2018")
self.cert_time_fail("Jan 5 09:34:43 2018 UTC")
self.cert_time_fail("Jan 35 09:34:43 2018 GMT")
self.cert_time_fail("Jon 5 09:34:43 2018 GMT")
self.cert_time_fail("Jan 5 24:00:00 2018 GMT")
self.cert_time_fail("Jan 5 09:60:43 2018 GMT")
newyear_ts = 1230768000.0
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT")
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
ctx.options)
ctx.options |= ssl.OP_NO_SSLv3
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value by OpenSSL
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but SVN_PYTHON_ORG_ROOT_CERT is a CA cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(SVN_PYTHON_ORG_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen(5)
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
"svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
ython.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
select.select([], [s], [], 5.0)
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
rc = s.connect_ex(("svn.python.org", 444))
c, (errno.ECONNREFUSED, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
if ssl.HAS_SNI:
s.connect(("svn.python.org", 443))
s.close()
else:
self.assertRaises(ValueError, s.connect, ("svn.python.org", 443))
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(("svn.python.org", 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
os.read(fd, 0)
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate('svn.python.org', 443, SVN_PYTHON_ORG_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet("svn.python.org"):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(("svn.python.org", 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_protocols.append(self.sslconn.selected_npn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_protocols = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except OSError as x:
if support.verbose:
sys.stdout.write("\nOSError is %s\n" % x.args[1])
except OSError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\OSError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_npn_protocol': s.selected_npn_protocol()
})
s.close()
stats['server_npn_protocols'] = server.selected_protocols
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
@needs_sni
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
# Server with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, True)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, True,
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://%s:%d/%s' % (
HOST, server.port, os.path.split(CERTFILE)[1])
f = urllib.request.urlopen(url)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| true | true |
f72fb180675bc7af5ad554ef5a79f7262a0c0838 | 5,272 | py | Python | devel/lib/python2.7/dist-packages/learning_topic/msg/_Person.py | youxiangming/ROS_learning | 29a88ed40982c71d32d469498ad67fba5d9d0943 | [
"MIT"
] | null | null | null | devel/lib/python2.7/dist-packages/learning_topic/msg/_Person.py | youxiangming/ROS_learning | 29a88ed40982c71d32d469498ad67fba5d9d0943 | [
"MIT"
] | null | null | null | devel/lib/python2.7/dist-packages/learning_topic/msg/_Person.py | youxiangming/ROS_learning | 29a88ed40982c71d32d469498ad67fba5d9d0943 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from learning_topic/Person.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Person(genpy.Message):
_md5sum = "8cf74e85a44e7a35ab62353a46e326a3"
_type = "learning_topic/Person"
_has_header = False # flag to mark the presence of a Header object
_full_text = """string name
uint8 sex
uint8 age
uint8 unknown =0
uint8 male=1
uint8 female=2
uint16 age1
float64 height"""
# Pseudo-constants
unknown = 0
male = 1
female = 2
__slots__ = ['name','sex','age','age1','height']
_slot_types = ['string','uint8','uint8','uint16','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
name,sex,age,age1,height
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Person, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.name is None:
self.name = ''
if self.sex is None:
self.sex = 0
if self.age is None:
self.age = 0
if self.age1 is None:
self.age1 = 0
if self.height is None:
self.height = 0.
else:
self.name = ''
self.sex = 0
self.age = 0
self.age1 = 0
self.height = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2BHd = None
def _get_struct_2BHd():
global _struct_2BHd
if _struct_2BHd is None:
_struct_2BHd = struct.Struct("<2BHd")
return _struct_2BHd
| 30.830409 | 145 | 0.628604 |
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Person(genpy.Message):
_md5sum = "8cf74e85a44e7a35ab62353a46e326a3"
_type = "learning_topic/Person"
_has_header = False
_full_text = """string name
uint8 sex
uint8 age
uint8 unknown =0
uint8 male=1
uint8 female=2
uint16 age1
float64 height"""
unknown = 0
male = 1
female = 2
__slots__ = ['name','sex','age','age1','height']
_slot_types = ['string','uint8','uint8','uint16','float64']
def __init__(self, *args, **kwds):
if args or kwds:
super(Person, self).__init__(*args, **kwds)
if self.name is None:
self.name = ''
if self.sex is None:
self.sex = 0
if self.age is None:
self.age = 0
if self.age1 is None:
self.age1 = 0
if self.height is None:
self.height = 0.
else:
self.name = ''
self.sex = 0
self.age = 0
self.age1 = 0
self.height = 0.
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
_x = self.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2BHd().pack(_x.sex, _x.age, _x.age1, _x.height))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.name = str[start:end]
_x = self
start = end
end += 12
(_x.sex, _x.age, _x.age1, _x.height,) = _get_struct_2BHd().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e)
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2BHd = None
def _get_struct_2BHd():
global _struct_2BHd
if _struct_2BHd is None:
_struct_2BHd = struct.Struct("<2BHd")
return _struct_2BHd
| true | true |
f72fb182d47331d80baf9b9fb6534806b6d38c8b | 1,856 | py | Python | appengine/tictactoe/webhandler.py | salendron/ai-kindergarten | 02782730753b7d9b70fcff2dcfbff0f7a4ed6e0e | [
"MIT"
] | null | null | null | appengine/tictactoe/webhandler.py | salendron/ai-kindergarten | 02782730753b7d9b70fcff2dcfbff0f7a4ed6e0e | [
"MIT"
] | null | null | null | appengine/tictactoe/webhandler.py | salendron/ai-kindergarten | 02782730753b7d9b70fcff2dcfbff0f7a4ed6e0e | [
"MIT"
] | null | null | null | from bottle import SimpleTemplate
from bottle import request
from .game import Game
from .player import AIPlayer
from .recorder import save_game, get_stats, get_last_training
player_names = {
'm': 'Bob',
'h': 'You'
}
def render_field(idx, game):
current_state = game.states[-1]
if current_state[idx] == -1:
return -1
elif current_state[idx] == 0:
return 0
else:
return 1
def index():
token = request.forms.get('game_token')
game = Game.from_token(token)
winner = -1
move = request.forms.get('move')
if move is not None:
game.play(int(move))
winner = game.get_winner()
if winner == -1:
game.play(AIPlayer().get_move(game))
winner = game.get_winner()
if token is None and game.players[0] == 'm':
# first round and bob has the first move
game.play(AIPlayer().get_move(game))
if winner != -1:
save_game(game)
#load stats
stats_h, stats_m, stats_t = get_stats()
tpl = SimpleTemplate(name="index.tpl", lookup=['./static/web/'])
return tpl.render(
content='tictactoe',
token=game.to_token(),
winner=winner,
player0=player_names[game.players[0]],
player1=player_names[game.players[1]],
winner_name=player_names[game.players[winner]] if winner != -1 and winner != 2 else "",
stats_h=stats_h,
stats_m=stats_m,
stats_t=stats_t,
last_train=get_last_training(),
field0=render_field(0, game),
field1=render_field(1, game),
field2=render_field(2, game),
field3=render_field(3, game),
field4=render_field(4, game),
field5=render_field(5, game),
field6=render_field(6, game),
field7=render_field(7, game),
field8=render_field(8, game)
)
| 25.081081 | 95 | 0.609375 | from bottle import SimpleTemplate
from bottle import request
from .game import Game
from .player import AIPlayer
from .recorder import save_game, get_stats, get_last_training
player_names = {
'm': 'Bob',
'h': 'You'
}
def render_field(idx, game):
current_state = game.states[-1]
if current_state[idx] == -1:
return -1
elif current_state[idx] == 0:
return 0
else:
return 1
def index():
token = request.forms.get('game_token')
game = Game.from_token(token)
winner = -1
move = request.forms.get('move')
if move is not None:
game.play(int(move))
winner = game.get_winner()
if winner == -1:
game.play(AIPlayer().get_move(game))
winner = game.get_winner()
if token is None and game.players[0] == 'm':
game.play(AIPlayer().get_move(game))
if winner != -1:
save_game(game)
stats_h, stats_m, stats_t = get_stats()
tpl = SimpleTemplate(name="index.tpl", lookup=['./static/web/'])
return tpl.render(
content='tictactoe',
token=game.to_token(),
winner=winner,
player0=player_names[game.players[0]],
player1=player_names[game.players[1]],
winner_name=player_names[game.players[winner]] if winner != -1 and winner != 2 else "",
stats_h=stats_h,
stats_m=stats_m,
stats_t=stats_t,
last_train=get_last_training(),
field0=render_field(0, game),
field1=render_field(1, game),
field2=render_field(2, game),
field3=render_field(3, game),
field4=render_field(4, game),
field5=render_field(5, game),
field6=render_field(6, game),
field7=render_field(7, game),
field8=render_field(8, game)
)
| true | true |
f72fb19d379b24571a1791dda2d07bd1524c5a49 | 1,313 | py | Python | Dynamic Obstacle Simulation/config.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | 3 | 2022-01-07T19:37:03.000Z | 2022-03-15T08:50:28.000Z | Dynamic Obstacle Simulation/config.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | null | null | null | Dynamic Obstacle Simulation/config.py | TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning | 717b9f07f4ed625ee33ab8ec22ce78dc2907d759 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 12:59:37 2021
@author: vxr131730
Author: Venkatraman Renganathan
Email: vrengana@utdallas.edu
Github: https://github.com/venkatramanrenganathan
- Create a configuration file for RRT*. Functions that use RRT* outputs
will use some of these configurations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import numpy as np
import pickle
maxIter = 100
Trial_Num = 1
Trial_Total = 100
num_states = 6
num_outputs = 4
total_dim = num_states + num_outputs
# Flag to select the estimator
# 1: UKF, 0: EKF
estimatorSelector = 1
# Flag to select DR or CC Risk Estimation
# True - Use DR Risk Constraints
# False - Use Chance Constraints
DRFlag = True
# Tracking Horizon for Car
carTrackHorizon = 4
# Flag to decide if dynamic obstacle or static obstacle based simulation
# True: Dynamic Obstacle, False: Static Obstacle
dynamicObs = True
# List of obstacle velocities
obsVelocities = [0.10, 0.20]
# Select velocitySelector to choose among the velocities list items
velocitySelector = 0
# Based on the dynamicObs flag, decide the velocity of obstacle
if dynamicObs:
constantObsVelocity = obsVelocities[velocitySelector]
else:
constantObsVelocity = 0
| 24.314815 | 80 | 0.677837 |
import numpy as np
import pickle
maxIter = 100
Trial_Num = 1
Trial_Total = 100
num_states = 6
num_outputs = 4
total_dim = num_states + num_outputs
estimatorSelector = 1
DRFlag = True
carTrackHorizon = 4
dynamicObs = True
obsVelocities = [0.10, 0.20]
velocitySelector = 0
if dynamicObs:
constantObsVelocity = obsVelocities[velocitySelector]
else:
constantObsVelocity = 0
| true | true |
f72fb1b068a952dc4984202737257c15d8acf40e | 2,692 | py | Python | koans/about_asserts.py | sayeda-khaled/python_koans | c834a95ece3cb1dd87b5c9923089c8c5b60b7154 | [
"MIT"
] | null | null | null | koans/about_asserts.py | sayeda-khaled/python_koans | c834a95ece3cb1dd87b5c9923089c8c5b60b7154 | [
"MIT"
] | null | null | null | koans/about_asserts.py | sayeda-khaled/python_koans | c834a95ece3cb1dd87b5c9923089c8c5b60b7154 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
"""
We shall contemplate truth by testing reality, via asserts.
"""
# Confused? This video should help:
#
# http://bit.ly/about_asserts
# self.assertTrue(False) # This should be True
self.assertTrue(True) # This should be True
def test_assert_with_message(self):
"""
Enlightenment may be more easily achieved with appropriate messages.
"""
# self.assertTrue(False, "This should be True -- Please fix this")
self.assertTrue(True, "This should be True -- Please fix this")
def test_fill_in_values(self):
"""
Sometimes we will ask you to fill in the values
"""
# self.assertEqual(__, 1 + 1)
# https://www.geeksforgeeks.org/python-unittest-assertequal-function/
self.assertEqual(2, 1 + 1) #asserting that the first argument equals the second one
def test_assert_equality(self):
"""
To understand reality, we must compare our expectations against reality.
"""
expected_value = 2
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
"""
Some ways of asserting equality are better than others.
"""
expected_value = 2
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
"""
Understand what lies within.
"""
# This throws an AssertionError exception
# assert False
# This throws an AssertionError exception
assert True
def test_that_sometimes_we_need_to_know_the_class_type(self):
"""
What is in a class name?
"""
# Sometimes we will ask you what the class type of an object is.
#
# For example, contemplate the text string "navel". What is its class type?
# The koans runner will include this feedback for this koan:
#
# AssertionError: '-=> FILL ME IN! <=-' != <type 'str'>
#
# So "navel".__class__ is equal to <type 'str'>? No not quite. This
# is just what it displays. The answer is simply str.
#
# See for yourself:
self.assertEqual(str, "navel".__class__) # It's str, not <type 'str'>
# Need an illustration? More reading can be found here:
#
# https://github.com/gregmalcolm/python_koans/wiki/Class-Attribute
| 31.302326 | 91 | 0.614413 |
from runner.koan import *
class AboutAsserts(Koan):
def test_assert_truth(self):
rue(True)
def test_assert_with_message(self):
self.assertTrue(True, "This should be True -- Please fix this")
def test_fill_in_values(self):
self.assertEqual(2, 1 + 1)
def test_assert_equality(self):
expected_value = 2
actual_value = 1 + 1
self.assertTrue(expected_value == actual_value)
def test_a_better_way_of_asserting_equality(self):
expected_value = 2
actual_value = 1 + 1
self.assertEqual(expected_value, actual_value)
def test_that_unittest_asserts_work_the_same_way_as_python_asserts(self):
assert True
def test_that_sometimes_we_need_to_know_the_class_type(self):
self.assertEqual(str, "navel".__class__)
# Need an illustration? More reading can be found here:
#
# https://github.com/gregmalcolm/python_koans/wiki/Class-Attribute
| true | true |
f72fb25142c4d22ce4847d1c4623c584a3ad02c2 | 1,326 | py | Python | aimsprop/pes.py | mtzgroup/aimsprop | 464d88ad7a817da73027fd2ab7b12476bf59f83d | [
"MIT"
] | 1 | 2022-03-28T13:11:56.000Z | 2022-03-28T13:11:56.000Z | aimsprop/pes.py | mtzgroup/aimsprop | 464d88ad7a817da73027fd2ab7b12476bf59f83d | [
"MIT"
] | 11 | 2021-03-17T17:53:58.000Z | 2021-07-17T17:59:25.000Z | aimsprop/pes.py | mtzgroup/aimsprop | 464d88ad7a817da73027fd2ab7b12476bf59f83d | [
"MIT"
] | 2 | 2021-04-05T08:36:35.000Z | 2021-05-20T22:12:12.000Z | import numpy as np
from .bundle import Bundle
def compute_pes(
bundle: Bundle,
carrier_frequency: float,
alpha: float,
eKT: np.ndarray,
) -> Bundle:
"""Compute the simple photoelectron spectroscopy, with Guassian blurring
User is responsible for calculating and assigning properties to the bundle frames:
Dyson Orbitals
Ionization Potential (IP)
Params:
bundle: the Bundle object to compute the property for (modified in
place)
carrier_frequency: experimental probe pulse carrier frequency (hbar*omega)
alpha: the Guassian blurring exponent
eKT: electron energies
Return:
bundle: reference to the input Bundle object. The property
key "pes" is set to computed PES property.
"""
for frame in bundle.frames:
IPs = frame.properties["IP"]
dyson_norms = frame.properties["dyson_norms"]
pes = np.zeros_like(eKT)
for ind, (state, IP) in enumerate(IPs):
dyson_norm = dyson_norms[np.where(dyson_norms[:, 0] == state), 1][0]
pes += (
dyson_norm
* np.sqrt(alpha / np.pi)
* np.exp(-alpha * (carrier_frequency - IP - eKT) ** 2)
)
frame.properties["pes"] = pes
return bundle
| 29.466667 | 86 | 0.610106 | import numpy as np
from .bundle import Bundle
def compute_pes(
bundle: Bundle,
carrier_frequency: float,
alpha: float,
eKT: np.ndarray,
) -> Bundle:
for frame in bundle.frames:
IPs = frame.properties["IP"]
dyson_norms = frame.properties["dyson_norms"]
pes = np.zeros_like(eKT)
for ind, (state, IP) in enumerate(IPs):
dyson_norm = dyson_norms[np.where(dyson_norms[:, 0] == state), 1][0]
pes += (
dyson_norm
* np.sqrt(alpha / np.pi)
* np.exp(-alpha * (carrier_frequency - IP - eKT) ** 2)
)
frame.properties["pes"] = pes
return bundle
| true | true |
f72fb25f8ffe85e2f2a3357c66821d11c44d4856 | 17,524 | py | Python | salesforce/xmltodict.py | iechenybsh/exiahuangl | 6834ec0d40a53ece3c0ff738c5b728616da26a02 | [
"Apache-2.0"
] | 29 | 2016-12-15T07:14:44.000Z | 2022-03-17T17:24:08.000Z | salesforce/xmltodict.py | iechenybsh/exiahuangl | 6834ec0d40a53ece3c0ff738c5b728616da26a02 | [
"Apache-2.0"
] | 5 | 2017-02-14T02:04:44.000Z | 2019-02-19T19:25:22.000Z | salesforce/xmltodict.py | iechenybsh/exiahuangl | 6834ec0d40a53ece3c0ff738c5b728616da26a02 | [
"Apache-2.0"
] | 12 | 2016-12-16T08:02:45.000Z | 2020-02-20T00:00:58.000Z | #!/usr/bin/env python
"Makes working with XML feel like you are working with JSON"
## https://github.com/martinblech/xmltodict
try:
from defusedexpat import pyexpat as expat
except ImportError:
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try: # pragma no cover
from cStringIO import StringIO
except ImportError: # pragma no cover
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try: # pragma no cover
from collections import OrderedDict
except ImportError: # pragma no cover
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
try: # pragma no cover
_basestring = basestring
except NameError: # pragma no cover
_basestring = str
try: # pragma no cover
_unicode = unicode
except NameError: # pragma no cover
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.11.0'
__license__ = 'MIT'
class ParsingInterrupted(Exception):
pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True,
namespace_separator=':',
namespaces=None,
force_list=None):
self.path = []
self.stack = []
self.data = []
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.namespace_declarations = OrderedDict()
self.force_list = force_list
def _build_name(self, full_name):
if not self.namespaces:
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i+1:]
short_namespace = self.namespaces.get(namespace, namespace)
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
return attrs
return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
def startNamespaceDecl(self, prefix, uri):
self.namespace_declarations[prefix or ''] = uri
def startElement(self, full_name, attrs):
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
if attrs and self.namespace_declarations:
attrs['xmlns'] = self.namespace_declarations
self.namespace_declarations = OrderedDict()
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attr_entries = []
for key, value in attrs.items():
key = self.attr_prefix+self._build_name(key)
if self.postprocessor:
entry = self.postprocessor(self.path, key, value)
else:
entry = (key, value)
if entry:
attr_entries.append(entry)
attrs = self.dict_constructor(attr_entries)
else:
attrs = None
self.item = attrs or None
self.data = []
def endElement(self, full_name):
name = self._build_name(full_name)
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = (None if not self.data
else self.cdata_separator.join(self.data))
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
data = (None if not self.data
else self.cdata_separator.join(self.data))
item = self.item
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = None
self.data = []
self.path.pop()
def characters(self, data):
if not self.data:
self.data = [data]
else:
self.data.append(data)
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
if self._should_force_list(key, data):
item[key] = [data]
else:
item[key] = data
return item
def _should_force_list(self, key, value):
if not self.force_list:
return False
try:
return key in self.force_list
except TypeError:
return self.force_list(self.path[:-1], key, value)
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
namespace_separator=':', disable_entities=True, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string` or a file-like object.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
>>> import xmltodict
>>> doc = xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>
... \"\"\")
>>> doc['a']['@prop']
u'x'
>>> doc['a']['b']
[u'1', u'2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
>>> def handle(path, item):
... print('path:%s item:%s' % (path, item))
... return True
...
>>> xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>\"\"\", item_depth=2, item_callback=handle)
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`,
`key` and `value` as positional arguments and returns a new `(key, value)`
pair where both `key` and `value` may have changed. Usage example::
>>> def postprocessor(path, key, value):
... try:
... return key + ':int', int(value)
... except (ValueError, TypeError):
... return key, value
>>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
... postprocessor=postprocessor)
OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))])
You can pass an alternate version of `expat` (such as `defusedexpat`) by
using the `expat` parameter. E.g:
>>> import defusedexpat
>>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
OrderedDict([(u'a', u'hello')])
You can use the force_list argument to force lists to be created even
when there is only a single child of a given level of hierarchy. The
force_list argument is a tuple of keys. If the key for a given level
of hierarchy is in the force_list argument, that level of hierarchy
will have a list as a child (even if there is only one sub-element).
The index_keys operation takes precendence over this. This is applied
after any user-supplied postprocessor has already run.
For example, given this input:
<servers>
<server>
<name>host1</name>
<os>Linux</os>
<interfaces>
<interface>
<name>em0</name>
<ip_address>10.0.0.1</ip_address>
</interface>
</interfaces>
</server>
</servers>
If called with force_list=('interface',), it will produce
this dictionary:
{'servers':
{'server':
{'name': 'host1',
'os': 'Linux'},
'interfaces':
{'interface':
[ {'name': 'em0', 'ip_address': '10.0.0.1' } ] } } }
`force_list` can also be a callable that receives `path`, `key` and
`value`. This is helpful in cases where the logic that decides whether
a list should be forced is more complex.
"""
handler = _DictSAXHandler(namespace_separator=namespace_separator,
**kwargs)
if isinstance(xml_input, _unicode):
if not encoding:
encoding = 'utf-8'
xml_input = xml_input.encode(encoding)
if not process_namespaces:
namespace_separator = None
parser = expat.ParserCreate(
encoding,
namespace_separator
)
try:
parser.ordered_attributes = True
except AttributeError:
# Jython's expat does not support ordered_attributes
pass
parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
parser.buffer_text = True
if disable_entities:
try:
# Attempt to disable DTD in Jython's expat parser (Xerces-J).
feature = "http://apache.org/xml/features/disallow-doctype-decl"
parser._reader.setFeature(feature, True)
except AttributeError:
# For CPython / expat parser.
# Anything not handled ends up here and entities aren't expanded.
parser.DefaultHandler = lambda x: None
# Expects an integer return; zero means failure -> expat.ExpatError.
parser.ExternalEntityRefHandler = lambda *x: 1
if hasattr(xml_input, 'read'):
parser.ParseFile(xml_input)
else:
parser.Parse(xml_input, True)
return handler.item
def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
if not namespaces:
return name
try:
ns, name = name.rsplit(ns_sep, 1)
except ValueError:
pass
else:
ns_res = namespaces.get(ns.strip(attr_prefix))
name = '{0}{1}{2}{3}'.format(
attr_prefix if ns.startswith(attr_prefix) else '',
ns_res, ns_sep, name) if ns_res else name
return name
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
depth=0,
preprocessor=None,
pretty=False,
newl='\n',
indent='\t',
namespace_separator=':',
namespaces=None,
full_document=True):
key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if (not hasattr(value, '__iter__')
or isinstance(value, _basestring)
or isinstance(value, dict)):
value = [value]
for index, v in enumerate(value):
if full_document and depth == 0 and index > 0:
raise ValueError('document with multiple roots')
if v is None:
v = OrderedDict()
elif isinstance(v, bool):
if v:
v = _unicode('true')
else:
v = _unicode('false')
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
ik = _process_namespace(ik, namespaces, namespace_separator,
attr_prefix)
if ik == '@xmlns' and isinstance(iv, dict):
for k, v in iv.items():
attr = 'xmlns{0}'.format(':{0}'.format(k) if k else '')
attrs[attr] = _unicode(v)
continue
if not isinstance(iv, _unicode):
iv = _unicode(iv)
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(key, AttributesImpl(attrs))
if pretty and children:
content_handler.ignorableWhitespace(newl)
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, depth+1, preprocessor,
pretty, newl, indent, namespaces=namespaces,
namespace_separator=namespace_separator)
if cdata is not None:
content_handler.characters(cdata)
if pretty and children:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(key)
if pretty and depth:
content_handler.ignorableWhitespace(newl)
def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
short_empty_elements=False,
**kwargs):
"""Emit an XML document for the given `input_dict` (reverse of `parse`).
The resulting XML document is returned as a string, but if `output` (a
file-like object) is specified, it is written there instead.
Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted
as XML node attributes, whereas keys equal to `cdata_key`
(default=`'#text'`) are treated as character data.
The `pretty` parameter (default=`False`) enables pretty-printing. In this
mode, lines are terminated with `'\n'` and indented with `'\t'`, but this
can be customized with the `newl` and `indent` parameters.
"""
if full_document and len(input_dict) != 1:
raise ValueError('Document must have exactly one root.')
must_return = False
if output is None:
output = StringIO()
must_return = True
if short_empty_elements:
content_handler = XMLGenerator(output, encoding, True)
else:
content_handler = XMLGenerator(output, encoding)
if full_document:
content_handler.startDocument()
for key, value in input_dict.items():
_emit(key, value, content_handler, full_document=full_document,
**kwargs)
if full_document:
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
try:
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
except AttributeError:
stdin = sys.stdin
stdout = sys.stdout
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), stdout)
return True
try:
root = parse(stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass | 37.127119 | 80 | 0.579605 |
t as expat
except ImportError:
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
try:
_basestring = basestring
except NameError:
_basestring = str
try:
_unicode = unicode
except NameError:
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.11.0'
__license__ = 'MIT'
class ParsingInterrupted(Exception):
pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None,
dict_constructor=OrderedDict,
strip_whitespace=True,
namespace_separator=':',
namespaces=None,
force_list=None):
self.path = []
self.stack = []
self.data = []
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.namespace_declarations = OrderedDict()
self.force_list = force_list
def _build_name(self, full_name):
if not self.namespaces:
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i+1:]
short_namespace = self.namespaces.get(namespace, namespace)
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
return attrs
return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
def startNamespaceDecl(self, prefix, uri):
self.namespace_declarations[prefix or ''] = uri
def startElement(self, full_name, attrs):
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
if attrs and self.namespace_declarations:
attrs['xmlns'] = self.namespace_declarations
self.namespace_declarations = OrderedDict()
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attr_entries = []
for key, value in attrs.items():
key = self.attr_prefix+self._build_name(key)
if self.postprocessor:
entry = self.postprocessor(self.path, key, value)
else:
entry = (key, value)
if entry:
attr_entries.append(entry)
attrs = self.dict_constructor(attr_entries)
else:
attrs = None
self.item = attrs or None
self.data = []
def endElement(self, full_name):
name = self._build_name(full_name)
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = (None if not self.data
else self.cdata_separator.join(self.data))
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
data = (None if not self.data
else self.cdata_separator.join(self.data))
item = self.item
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = None
self.data = []
self.path.pop()
def characters(self, data):
if not self.data:
self.data = [data]
else:
self.data.append(data)
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
if self._should_force_list(key, data):
item[key] = [data]
else:
item[key] = data
return item
def _should_force_list(self, key, value):
if not self.force_list:
return False
try:
return key in self.force_list
except TypeError:
return self.force_list(self.path[:-1], key, value)
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
namespace_separator=':', disable_entities=True, **kwargs):
handler = _DictSAXHandler(namespace_separator=namespace_separator,
**kwargs)
if isinstance(xml_input, _unicode):
if not encoding:
encoding = 'utf-8'
xml_input = xml_input.encode(encoding)
if not process_namespaces:
namespace_separator = None
parser = expat.ParserCreate(
encoding,
namespace_separator
)
try:
parser.ordered_attributes = True
except AttributeError:
pass
parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
parser.buffer_text = True
if disable_entities:
try:
# Attempt to disable DTD in Jython's expat parser (Xerces-J).
feature = "http://apache.org/xml/features/disallow-doctype-decl"
parser._reader.setFeature(feature, True)
except AttributeError:
parser.DefaultHandler = lambda x: None
# Expects an integer return; zero means failure -> expat.ExpatError.
parser.ExternalEntityRefHandler = lambda *x: 1
if hasattr(xml_input, 'read'):
parser.ParseFile(xml_input)
else:
parser.Parse(xml_input, True)
return handler.item
def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
if not namespaces:
return name
try:
ns, name = name.rsplit(ns_sep, 1)
except ValueError:
pass
else:
ns_res = namespaces.get(ns.strip(attr_prefix))
name = '{0}{1}{2}{3}'.format(
attr_prefix if ns.startswith(attr_prefix) else '',
ns_res, ns_sep, name) if ns_res else name
return name
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='
depth=0,
preprocessor=None,
pretty=False,
newl='\n',
indent='\t',
namespace_separator=':',
namespaces=None,
full_document=True):
key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if (not hasattr(value, '__iter__')
or isinstance(value, _basestring)
or isinstance(value, dict)):
value = [value]
for index, v in enumerate(value):
if full_document and depth == 0 and index > 0:
raise ValueError('document with multiple roots')
if v is None:
v = OrderedDict()
elif isinstance(v, bool):
if v:
v = _unicode('true')
else:
v = _unicode('false')
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
ik = _process_namespace(ik, namespaces, namespace_separator,
attr_prefix)
if ik == '@xmlns' and isinstance(iv, dict):
for k, v in iv.items():
attr = 'xmlns{0}'.format(':{0}'.format(k) if k else '')
attrs[attr] = _unicode(v)
continue
if not isinstance(iv, _unicode):
iv = _unicode(iv)
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(key, AttributesImpl(attrs))
if pretty and children:
content_handler.ignorableWhitespace(newl)
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, depth+1, preprocessor,
pretty, newl, indent, namespaces=namespaces,
namespace_separator=namespace_separator)
if cdata is not None:
content_handler.characters(cdata)
if pretty and children:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(key)
if pretty and depth:
content_handler.ignorableWhitespace(newl)
def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
short_empty_elements=False,
**kwargs):
if full_document and len(input_dict) != 1:
raise ValueError('Document must have exactly one root.')
must_return = False
if output is None:
output = StringIO()
must_return = True
if short_empty_elements:
content_handler = XMLGenerator(output, encoding, True)
else:
content_handler = XMLGenerator(output, encoding)
if full_document:
content_handler.startDocument()
for key, value in input_dict.items():
_emit(key, value, content_handler, full_document=full_document,
**kwargs)
if full_document:
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
try:
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
except AttributeError:
stdin = sys.stdin
stdout = sys.stdout
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), stdout)
return True
try:
root = parse(stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass | true | true |
f72fb28880a0595528ca930a65ee989250ef37de | 410 | py | Python | src/index.py | dekokun/aws-lambda-parallelization-factor-test | a9d73cd15a3549e1b908cf4504c136173f54839c | [
"MIT"
] | 1 | 2019-12-19T10:37:50.000Z | 2019-12-19T10:37:50.000Z | src/index.py | dekokun/aws-lambda-parallelization-factor-test | a9d73cd15a3549e1b908cf4504c136173f54839c | [
"MIT"
] | null | null | null | src/index.py | dekokun/aws-lambda-parallelization-factor-test | a9d73cd15a3549e1b908cf4504c136173f54839c | [
"MIT"
] | null | null | null | import base64
import json
import time
def lambda_handler(event, context):
print ('start handler')
count = len(event['Records'])
print ('Get record count:')
print (count)
for record in event['Records']:
payload = base64.b64decode(record['kinesis']['data']).decode("utf-8")
print("Payload: " + payload)
time.sleep(1)
return 'Successfully {} records.'.format(count)
| 24.117647 | 77 | 0.643902 | import base64
import json
import time
def lambda_handler(event, context):
print ('start handler')
count = len(event['Records'])
print ('Get record count:')
print (count)
for record in event['Records']:
payload = base64.b64decode(record['kinesis']['data']).decode("utf-8")
print("Payload: " + payload)
time.sleep(1)
return 'Successfully {} records.'.format(count)
| true | true |
f72fb295397c8e42dff827498e5277948595c2fd | 926 | py | Python | backend/migrations/versions/d2f3d6010615_modify_message_table.py | ClassesOver/Stories | 419eb30b1afe053772be5e3e65b0cd1038eb0c81 | [
"Apache-2.0"
] | 1 | 2020-12-29T09:06:37.000Z | 2020-12-29T09:06:37.000Z | backend/migrations/versions/d2f3d6010615_modify_message_table.py | ClassesOver/blog | 419eb30b1afe053772be5e3e65b0cd1038eb0c81 | [
"Apache-2.0"
] | null | null | null | backend/migrations/versions/d2f3d6010615_modify_message_table.py | ClassesOver/blog | 419eb30b1afe053772be5e3e65b0cd1038eb0c81 | [
"Apache-2.0"
] | null | null | null | """modify message table
Revision ID: d2f3d6010615
Revises: fbb3ebcf5f90
Create Date: 2020-12-24 11:56:01.558233
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd2f3d6010615'
down_revision = 'fbb3ebcf5f90'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('message', schema=None) as batch_op:
batch_op.add_column(sa.Column('res_id', sa.Integer(), nullable=True))
batch_op.add_column(sa.Column('res_model', sa.String(length=16), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('message', schema=None) as batch_op:
batch_op.drop_column('res_model')
batch_op.drop_column('res_id')
# ### end Alembic commands ###
| 26.457143 | 88 | 0.698704 | from alembic import op
import sqlalchemy as sa
revision = 'd2f3d6010615'
down_revision = 'fbb3ebcf5f90'
branch_labels = None
depends_on = None
def upgrade():
umn('res_model', sa.String(length=16), nullable=True))
| true | true |
f72fb2fc1f4ca1f1629f405f76b905ba489cd3be | 1,136 | py | Python | app/core/tests/test_model.py | burakkirlaroglu/recipe-app-api | 0953921bce860502746af4447fb90136e7070faf | [
"MIT"
] | null | null | null | app/core/tests/test_model.py | burakkirlaroglu/recipe-app-api | 0953921bce860502746af4447fb90136e7070faf | [
"MIT"
] | null | null | null | app/core/tests/test_model.py | burakkirlaroglu/recipe-app-api | 0953921bce860502746af4447fb90136e7070faf | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_create_user_with_email_successfully(self):
email = 'test@test.com'
password = '12345'
user = get_user_model().objects.create_user(
email=email,
password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@TEST.COM'
user = get_user_model().objects.create_user(email, '1234')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(email=None, password='1234')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(email='test@test.com',
password='123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 32.457143 | 79 | 0.649648 | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_create_user_with_email_successfully(self):
email = 'test@test.com'
password = '12345'
user = get_user_model().objects.create_user(
email=email,
password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'test@TEST.COM'
user = get_user_model().objects.create_user(email, '1234')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(email=None, password='1234')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(email='test@test.com',
password='123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| true | true |
f72fb3a7fe0c4c14b9ceafe1c2d3cdb20fb8fa98 | 1,358 | py | Python | src/araugment/augment.py | ashaheedq/araugment | 53436c0c00c924f80238f9c4ae09c9966953e4d1 | [
"MIT"
] | null | null | null | src/araugment/augment.py | ashaheedq/araugment | 53436c0c00c924f80238f9c4ae09c9966953e4d1 | [
"MIT"
] | null | null | null | src/araugment/augment.py | ashaheedq/araugment | 53436c0c00c924f80238f9c4ae09c9966953e4d1 | [
"MIT"
] | null | null | null | # coding:utf-8
# author Abdulshaheed Alqunber
# version : 1.0.0
from google_trans_new import google_translator
import markovify as mk
def back_translate(text, language_src="ar", language_dst="zh"):
"""Translate text to a foreign language then translate back to original language to augment data
Parameters:
text (string): non-empty string
original: language of input text, must match the format in this link
https://github.com/lushan88a/google_trans_new/blob/main/constant.py
language: language in which the text is going to be translated to
Returns:
string: the back translated text.
"""
try:
t = google_translator()
# translate to target language
translated_text = t.translate(text.strip(), language_dst)
# translate to orignal language
translated_back = t.translate(translated_text, language_src)
return translated_back
# failed to translate, return original
except:
return text
def markov(document, n):
"""This method uses Markov chains to string together n new sequences of words based on previous sequences.
Parameters:
document (list): list of sentences
Returns:
list: list of new generated sentences
"""
text_model = mk.Text(document)
return [text_model.make_sentence() for i in range(n)]
| 31.581395 | 110 | 0.700295 |
from google_trans_new import google_translator
import markovify as mk
def back_translate(text, language_src="ar", language_dst="zh"):
try:
t = google_translator()
translated_text = t.translate(text.strip(), language_dst)
translated_back = t.translate(translated_text, language_src)
return translated_back
except:
return text
def markov(document, n):
text_model = mk.Text(document)
return [text_model.make_sentence() for i in range(n)]
| true | true |
f72fb5cbe866769c53578f3efd0203eed8351817 | 11,982 | py | Python | configs/visualgenome_kr/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 24 | 2021-10-14T03:28:28.000Z | 2022-03-29T09:30:04.000Z | configs/visualgenome_kr/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 4 | 2021-12-14T15:04:49.000Z | 2022-02-19T09:54:42.000Z | configs/visualgenome_kr/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x.py | yizhe-ang/MMSceneGraph | d4daec3d7930d6fe1efe75b9c0a265c8be0b70ba | [
"MIT"
] | 4 | 2021-10-31T11:23:06.000Z | 2021-12-17T06:38:50.000Z | # dataset settings
dataset_type = 'VisualGenomeKRDataset'
data_root = 'data/visualgenomekr/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
# Since the forward process may need gt info, annos must be loaded.
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
# NOTE: Do not change the img to DC.
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
test_kr_pipeline = [
dict(type='LoadImageFromFile'),
# Since the forward process may need gt info, annos must be loaded.
dict(type='LoadAnnotations', with_bbox=True, with_rel=True, with_keyrel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
# NOTE: Do not change the img to DC.
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=train_pipeline,
num_im=-1,
num_val_im=1000,
split='train',
img_prefix=data_root + 'Images/'),
val=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
num_val_im=1000,
split='val',
img_prefix=data_root + 'Images/'),
test=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
split='test',
img_prefix=data_root + 'Images/'),
test_kr=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_kr_pipeline,
num_im=-1,
split='test',
split_type='withkey',
img_prefix=data_root + 'Images/'))
# model settings
dataset_config = data['train'].copy()
dataset_config.update(dict(cache=data_root + 'VG_statistics.cache'))
model = dict(
type='FasterRCNN',
pretrained='checkpoints/mmlab/imnet/resnext101_64x4d-ee2c6f71.pth',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=201,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
# embedded a saliency detector
saliency_detector=dict(
type='SCRNSaliencyDetector',
pretrained='./saliency_experiments/SOC_SCRN/latest.pth',
eval_mode=True,
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch')),
relation_head=dict(
type='MotifHead',
dataset_config=dataset_config,
num_classes=201,
num_predicates=81,
use_bias=True,
head_config=dict(
use_gt_box=True,
use_gt_label=True,
use_vision=True,
embed_dim=200,
hidden_dim=512,
roi_dim=1024,
context_pooling_dim=4096,
dropout_rate=0.2,
context_object_layer=1,
context_edge_layer=1,
glove_dir='data/glove/',
causal_effect_analysis=False),
bbox_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
# mask_roi_layer=dict(type='ShapeAwareRoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=True,
separate_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_sampler=dict(
type='Motif',
pos_iou_thr=0.5,
require_overlap=False, # for sgdet training, not require
num_sample_per_gt_rel=4,
num_rel_per_image=128,
pos_fraction=0.25,
test_overlap=True # for testing
),
# relation ranker
relation_ranker=dict(
type='LinearRanker', # LinearRanker-KLDiv 10; LSTMRanker-KLdiv 1000; TransformerRanker 1000
comb_factor=0.8,
area_form='rect',
loss=dict(
type='KLDivLoss', loss_weight=10),
input_dim=4096),
loss_object=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_relation=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=50, # Follow the setting in TDE, 80 Bboxes are selected.
mask_thr_binary=0.5,
rle_mask_encode=False, # do not transform the mask into rle.
crop_mask=True, # so that the mask shape is the same as bbox, instead of image shape
format_mask_result=False, # do not transform to the result format like bbox
to_tensor=True))
find_unused_parameters = True
evaluation = dict(interval=1, metric='predcls', relation_mode=True, classwise=True, key_first=False)
# optimizer
optimizer = dict(type='SGD', lr=0.06, momentum=0.9, weight_decay=0.0001,
freeze_modules=['backbone', 'neck', 'rpn_head', 'bbox_head', 'saliency_detector'])
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[7, 10])
checkpoint_config = dict(interval=1)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './new_experiments/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x'
load_from = './new_experiments/VGKR_Detection_faster_rcnn_x101_64x4d_fpn_1x/latest.pth'
# load_mapping = dict(align_dict={'relation_head.bbox_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs',
# 'relation_head.relation_roi_extractor.visual_bbox_head': 'bbox_head.shared_fcs'})
resume_from = None
workflow = [('train', 1), ('val', 1)]
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(
project=work_dir.split('/')[-1],
name='train-1',
config=work_dir + '/cfg.yaml'))
])
| 36.867692 | 115 | 0.580204 |
dataset_type = 'VisualGenomeKRDataset'
data_root = 'data/visualgenomekr/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_rels', 'gt_relmaps']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
test_kr_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_rel=True, with_keyrel=True),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_bboxes', 'gt_labels']),
dict(type='ToDataContainer', fields=(dict(key='gt_bboxes'), dict(key='gt_labels'))),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=train_pipeline,
num_im=-1,
num_val_im=1000,
split='train',
img_prefix=data_root + 'Images/'),
val=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
num_val_im=1000,
split='val',
img_prefix=data_root + 'Images/'),
test=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_pipeline,
num_im=-1,
split='test',
img_prefix=data_root + 'Images/'),
test_kr=dict(
type=dataset_type,
roidb_file=data_root + 'VGKR-SGG.h5',
dict_file=data_root + 'VGKR-SGG-dicts.json',
image_file=data_root + 'recsize_image_data.json',
pipeline=test_kr_pipeline,
num_im=-1,
split='test',
split_type='withkey',
img_prefix=data_root + 'Images/'))
dataset_config = data['train'].copy()
dataset_config.update(dict(cache=data_root + 'VG_statistics.cache'))
model = dict(
type='FasterRCNN',
pretrained='checkpoints/mmlab/imnet/resnext101_64x4d-ee2c6f71.pth',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=201,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
saliency_detector=dict(
type='SCRNSaliencyDetector',
pretrained='./saliency_experiments/SOC_SCRN/latest.pth',
eval_mode=True,
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch')),
relation_head=dict(
type='MotifHead',
dataset_config=dataset_config,
num_classes=201,
num_predicates=81,
use_bias=True,
head_config=dict(
use_gt_box=True,
use_gt_label=True,
use_vision=True,
embed_dim=200,
hidden_dim=512,
roi_dim=1024,
context_pooling_dim=4096,
dropout_rate=0.2,
context_object_layer=1,
context_edge_layer=1,
glove_dir='data/glove/',
causal_effect_analysis=False),
bbox_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_roi_extractor=dict(
type='VisualSpatialExtractor',
bbox_roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
with_visual_bbox=True,
with_visual_mask=False,
with_visual_point=False,
with_spatial=True,
separate_spatial=False,
in_channels=256,
fc_out_channels=1024,
featmap_strides=[4, 8, 16, 32]),
relation_sampler=dict(
type='Motif',
pos_iou_thr=0.5,
require_overlap=False,
num_sample_per_gt_rel=4,
num_rel_per_image=128,
pos_fraction=0.25,
test_overlap=True
),
relation_ranker=dict(
type='LinearRanker',
comb_factor=0.8,
area_form='rect',
loss=dict(
type='KLDivLoss', loss_weight=10),
input_dim=4096),
loss_object=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_relation=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=50,
mask_thr_binary=0.5,
rle_mask_encode=False,
crop_mask=True,
format_mask_result=False,
to_tensor=True))
find_unused_parameters = True
evaluation = dict(interval=1, metric='predcls', relation_mode=True, classwise=True, key_first=False)
optimizer = dict(type='SGD', lr=0.06, momentum=0.9, weight_decay=0.0001,
freeze_modules=['backbone', 'neck', 'rpn_head', 'bbox_head', 'saliency_detector'])
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[7, 10])
checkpoint_config = dict(interval=1)
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './new_experiments/VGCOCO_PredCls_motif_wS82_linearranker_faster_rcnn_x101_64x4d_fpn_1x'
load_from = './new_experiments/VGKR_Detection_faster_rcnn_x101_64x4d_fpn_1x/latest.pth'
resume_from = None
workflow = [('train', 1), ('val', 1)]
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
dict(type='WandbLoggerHook',
init_kwargs=dict(
project=work_dir.split('/')[-1],
name='train-1',
config=work_dir + '/cfg.yaml'))
])
| true | true |
f72fb606aa4f0da5563e1b53b0ee4a213e3fad46 | 777 | py | Python | bindings/python/tools/python/ostk/mathematics/__init__.py | open-space-collective/library-mathematics | fdb4769a56a8fe35ffefb01a79c03cfca1f91958 | [
"Apache-2.0"
] | 5 | 2018-08-20T06:47:24.000Z | 2019-07-15T03:36:52.000Z | bindings/python/tools/python/ostk/mathematics/__init__.py | open-space-collective/open-space-toolkit-mathematics | 4b97f97f4aaa87bff848381a3519c6f764461378 | [
"Apache-2.0"
] | 6 | 2020-01-05T20:18:18.000Z | 2021-10-14T09:36:44.000Z | bindings/python/tools/python/ostk/mathematics/__init__.py | open-space-collective/open-space-toolkit-mathematics | 4b97f97f4aaa87bff848381a3519c6f764461378 | [
"Apache-2.0"
] | 2 | 2020-03-05T18:18:13.000Z | 2020-04-07T17:42:24.000Z | ################################################################################################################################################################
# @project Open Space Toolkit ▸ Mathematics
# @file bindings/python/tools/python/ostk/mathematics/__init__.py
# @author Lucas Brémond <lucas@loftorbital.com>
# @license Apache License 2.0
################################################################################################################################################################
from ostk.core import *
from .OpenSpaceToolkitMathematicsPy import *
################################################################################################################################################################
| 51.8 | 160 | 0.262548 | true | true | |
f72fb630e3bedcdefa0eb57e63dd067409e8bfcd | 440 | py | Python | src/core/__init__.py | el-ideal-ideas/MocaCommands | 33dda0b05ca3b34f4855ff264bcf70d016dfb1c0 | [
"MIT"
] | null | null | null | src/core/__init__.py | el-ideal-ideas/MocaCommands | 33dda0b05ca3b34f4855ff264bcf70d016dfb1c0 | [
"MIT"
] | null | null | null | src/core/__init__.py | el-ideal-ideas/MocaCommands | 33dda0b05ca3b34f4855ff264bcf70d016dfb1c0 | [
"MIT"
] | null | null | null | # -- Imports --------------------------------------------------------------------------
from .core import (
VERSION, TOP_DIR, CONFIG_DIR, LOG_DIR, SRC_DIR, STATIC_DIR, STORAGE_DIR, SYSTEM_CONFIG, SANIC_CONFIG, SERVER_CONFIG,
COMMANDS_DIR, COMMANDS_CONFIG, IP_BLACKLIST_FILE, API_KEY_FILE, SCRIPTS_DIR, system_config, commands, ip_blacklist
)
# -------------------------------------------------------------------------- Imports --
| 48.888889 | 120 | 0.511364 |
from .core import (
VERSION, TOP_DIR, CONFIG_DIR, LOG_DIR, SRC_DIR, STATIC_DIR, STORAGE_DIR, SYSTEM_CONFIG, SANIC_CONFIG, SERVER_CONFIG,
COMMANDS_DIR, COMMANDS_CONFIG, IP_BLACKLIST_FILE, API_KEY_FILE, SCRIPTS_DIR, system_config, commands, ip_blacklist
)
| true | true |
f72fb6db38ad0943de7ba102448dbf868e181c11 | 18,487 | py | Python | MultiServer.py | Schulzer99/MultiWorld-Utilities | 031f7e6e21dc80290524e51c165cd28ac10747b3 | [
"MIT"
] | null | null | null | MultiServer.py | Schulzer99/MultiWorld-Utilities | 031f7e6e21dc80290524e51c165cd28ac10747b3 | [
"MIT"
] | null | null | null | MultiServer.py | Schulzer99/MultiWorld-Utilities | 031f7e6e21dc80290524e51c165cd28ac10747b3 | [
"MIT"
] | null | null | null | import argparse
import asyncio
import functools
import json
import logging
import re
import shlex
import urllib.request
import zlib
import ModuleUpdate
ModuleUpdate.update()
import websockets
import aioconsole
import Items
import Regions
from MultiClient import ReceivedItem, get_item_name_from_id, get_location_name_from_address
class Client:
def __init__(self, socket):
self.socket = socket
self.auth = False
self.name = None
self.team = None
self.slot = None
self.send_index = 0
class Context:
def __init__(self, host, port, password):
self.data_filename = None
self.save_filename = None
self.disable_save = False
self.player_names = {}
self.rom_names = {}
self.remote_items = set()
self.locations = {}
self.host = host
self.port = port
self.password = password
self.server = None
self.countdown_timer = 0
self.clients = []
self.received_items = {}
async def send_msgs(websocket, msgs):
if not websocket or not websocket.open or websocket.closed:
return
try:
await websocket.send(json.dumps(msgs))
except websockets.ConnectionClosed:
pass
def broadcast_all(ctx : Context, msgs):
for client in ctx.clients:
if client.auth:
asyncio.create_task(send_msgs(client.socket, msgs))
def broadcast_team(ctx : Context, team, msgs):
for client in ctx.clients:
if client.auth and client.team == team:
asyncio.create_task(send_msgs(client.socket, msgs))
def notify_all(ctx : Context, text):
logging.info("Notice (all): %s" % text)
broadcast_all(ctx, [['Print', text]])
def notify_team(ctx : Context, team : int, text : str):
logging.info("Notice (Team #%d): %s" % (team+1, text))
broadcast_team(ctx, team, [['Print', text]])
def notify_client(client : Client, text : str):
if not client.auth:
return
logging.info("Notice (Player %s in team %d): %s" % (client.name, client.team+1, text))
asyncio.create_task(send_msgs(client.socket, [['Print', text]]))
async def server(websocket, path, ctx : Context):
client = Client(websocket)
ctx.clients.append(client)
try:
await on_client_connected(ctx, client)
async for data in websocket:
for msg in json.loads(data):
if len(msg) == 1:
cmd = msg
args = None
else:
cmd = msg[0]
args = msg[1]
await process_client_cmd(ctx, client, cmd, args)
except Exception as e:
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
finally:
await on_client_disconnected(ctx, client)
ctx.clients.remove(client)
async def on_client_connected(ctx : Context, client : Client):
await send_msgs(client.socket, [['RoomInfo', {
'password': ctx.password is not None,
'players': [(client.team, client.slot, client.name) for client in ctx.clients if client.auth]
}]])
async def on_client_disconnected(ctx : Context, client : Client):
if client.auth:
await on_client_left(ctx, client)
async def on_client_joined(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has joined the game" % (client.name, client.team + 1))
async def on_client_left(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has left the game" % (client.name, client.team + 1))
async def countdown(ctx : Context, timer):
notify_all(ctx, f'[Server]: Starting countdown of {timer}s')
if ctx.countdown_timer:
ctx.countdown_timer = timer
return
ctx.countdown_timer = timer
while ctx.countdown_timer > 0:
notify_all(ctx, f'[Server]: {ctx.countdown_timer}')
ctx.countdown_timer -= 1
await asyncio.sleep(1)
notify_all(ctx, f'[Server]: GO')
def get_connected_players_string(ctx : Context):
auth_clients = [c for c in ctx.clients if c.auth]
if not auth_clients:
return 'No player connected'
auth_clients.sort(key=lambda c: (c.team, c.slot))
current_team = 0
text = 'Team #1: '
for c in auth_clients:
if c.team != current_team:
text += f':: Team #{c.team + 1}: '
current_team = c.team
text += f'{c.name} '
return 'Connected players: ' + text[:-1]
def get_received_items(ctx : Context, team, player):
return ctx.received_items.setdefault((team, player), [])
def tuplize_received_items(items):
return [(item.item, item.location, item.player) for item in items]
def send_new_items(ctx : Context):
for client in ctx.clients:
if not client.auth:
continue
items = get_received_items(ctx, client.team, client.slot)
if len(items) > client.send_index:
asyncio.create_task(send_msgs(client.socket, [['ReceivedItems', (client.send_index, tuplize_received_items(items)[client.send_index:])]]))
client.send_index = len(items)
def forfeit_player(ctx : Context, team, slot):
all_locations = [values[0] for values in Regions.location_table.values() if type(values[0]) is int]
notify_all(ctx, "%s (Team #%d) has forfeited" % (ctx.player_names[(team, slot)], team + 1))
register_location_checks(ctx, team, slot, all_locations)
def register_location_checks(ctx : Context, team, slot, locations):
found_items = False
for location in locations:
if (location, slot) in ctx.locations:
target_item, target_player = ctx.locations[(location, slot)]
if target_player != slot or slot in ctx.remote_items:
found = False
recvd_items = get_received_items(ctx, team, target_player)
for recvd_item in recvd_items:
if recvd_item.location == location and recvd_item.player == slot:
found = True
break
if not found:
new_item = ReceivedItem(target_item, location, slot)
recvd_items.append(new_item)
if slot != target_player:
broadcast_team(ctx, team, [['ItemSent', (slot, location, target_player, target_item)]])
logging.info('(Team #%d) %s sent %s to %s (%s)' % (team+1, ctx.player_names[(team, slot)], get_item_name_from_id(target_item), ctx.player_names[(team, target_player)], get_location_name_from_address(location)))
found_items = True
send_new_items(ctx)
if found_items and not ctx.disable_save:
try:
with open(ctx.save_filename, "wb") as f:
jsonstr = json.dumps((list(ctx.rom_names.items()),
[(k, [i.__dict__ for i in v]) for k, v in ctx.received_items.items()]))
f.write(zlib.compress(jsonstr.encode("utf-8")))
except Exception as e:
logging.exception(e)
async def process_client_cmd(ctx : Context, client : Client, cmd, args):
if type(cmd) is not str:
await send_msgs(client.socket, [['InvalidCmd']])
return
if cmd == 'Connect':
if not args or type(args) is not dict or \
'password' not in args or type(args['password']) not in [str, type(None)] or \
'rom' not in args or type(args['rom']) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'Connect']])
return
errors = set()
if ctx.password is not None and args['password'] != ctx.password:
errors.add('InvalidPassword')
if tuple(args['rom']) not in ctx.rom_names:
errors.add('InvalidRom')
else:
team, slot = ctx.rom_names[tuple(args['rom'])]
if any([c.slot == slot and c.team == team for c in ctx.clients if c.auth]):
errors.add('SlotAlreadyTaken')
else:
client.name = ctx.player_names[(team, slot)]
client.team = team
client.slot = slot
if errors:
await send_msgs(client.socket, [['ConnectionRefused', list(errors)]])
else:
client.auth = True
reply = [['Connected', [(client.team, client.slot), [(p, n) for (t, p), n in ctx.player_names.items() if t == client.team]]]]
items = get_received_items(ctx, client.team, client.slot)
if items:
reply.append(['ReceivedItems', (0, tuplize_received_items(items))])
client.send_index = len(items)
await send_msgs(client.socket, reply)
await on_client_joined(ctx, client)
if not client.auth:
return
if cmd == 'Sync':
items = get_received_items(ctx, client.team, client.slot)
if items:
client.send_index = len(items)
await send_msgs(client.socket, [['ReceivedItems', (0, tuplize_received_items(items))]])
if cmd == 'LocationChecks':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationChecks']])
return
register_location_checks(ctx, client.team, client.slot, args)
if cmd == 'LocationScouts':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
locs = []
for location in args:
if type(location) is not int or 0 >= location > len(Regions.location_table):
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
loc_name = list(Regions.location_table.keys())[location - 1]
target_item, target_player = ctx.locations[(Regions.location_table[loc_name][0], client.slot)]
replacements = {'SmallKey': 0xA2, 'BigKey': 0x9D, 'Compass': 0x8D, 'Map': 0x7D}
item_type = [i[2] for i in Items.item_table.values() if type(i[3]) is int and i[3] == target_item]
if item_type:
target_item = replacements.get(item_type[0], target_item)
locs.append([loc_name, location, target_item, target_player])
logging.info(f"{client.name} in team {client.team+1} scouted {', '.join([l[0] for l in locs])}")
await send_msgs(client.socket, [['LocationInfo', [l[1:] for l in locs]]])
if cmd == 'Say':
if type(args) is not str or not args.isprintable():
await send_msgs(client.socket, [['InvalidArguments', 'Say']])
return
notify_all(ctx, client.name + ': ' + args)
if args.startswith('!players'):
notify_all(ctx, get_connected_players_string(ctx))
if args.startswith('!forfeit'):
forfeit_player(ctx, client.team, client.slot)
if args.startswith('!countdown'):
try:
timer = int(args.split()[1])
except (IndexError, ValueError):
timer = 10
asyncio.create_task(countdown(ctx, timer))
def set_password(ctx : Context, password):
ctx.password = password
logging.warning('Password set to ' + password if password is not None else 'Password disabled')
async def console(ctx : Context):
while True:
input = await aioconsole.ainput()
try:
command = shlex.split(input)
if not command:
continue
if command[0] == '/exit':
ctx.server.ws_server.close()
break
if command[0] == '/players':
logging.info(get_connected_players_string(ctx))
if command[0] == '/password':
set_password(ctx, command[1] if len(command) > 1 else None)
if command[0] == '/kick' and len(command) > 1:
team = int(command[2]) - 1 if len(command) > 2 and command[2].isdigit() else None
for client in ctx.clients:
if client.auth and client.name.lower() == command[1].lower() and (team is None or team == client.team):
if client.socket and not client.socket.closed:
await client.socket.close()
if command[0] == '/forfeitslot' and len(command) > 1 and command[1].isdigit():
if len(command) > 2 and command[2].isdigit():
team = int(command[1]) - 1
slot = int(command[2])
else:
team = 0
slot = int(command[1])
forfeit_player(ctx, team, slot)
if command[0] == '/forfeitplayer' and len(command) > 1:
seeked_player = command[1].lower()
for (team, slot), name in ctx.player_names.items():
if name.lower() == seeked_player:
forfeit_player(ctx, team, slot)
if command[0] == '/senditem' and len(command) > 2:
[(player, item)] = re.findall(r'\S* (\S*) (.*)', input)
if item in Items.item_table:
for client in ctx.clients:
if client.auth and client.name.lower() == player.lower():
new_item = ReceivedItem(Items.item_table[item][3], "cheat console", client.slot)
get_received_items(ctx, client.team, client.slot).append(new_item)
notify_all(ctx, 'Cheat console: sending "' + item + '" to ' + client.name)
send_new_items(ctx)
else:
logging.warning("Unknown item: " + item)
if command[0] == '/hint':
for (team,slot), name in ctx.player_names.items():
if len(command) == 1:
print("Use /hint {Playername} {itemname}\nFor example /hint Berserker Lamp")
elif name.lower() == command[1].lower():
item = " ".join(command[2:])
if item in Items.item_table:
seeked_item_id = Items.item_table[item][3]
for check, result in ctx.locations.items():
item_id, receiving_player = result
if receiving_player == slot and item_id == seeked_item_id:
location_id, finding_player = check
name_finder = ctx.player_names[team, finding_player]
hint = f"[Hint]: {name}'s {item} can be found at " \
f"{get_location_name_from_address(location_id)} in {name_finder}'s World"
notify_team(ctx, team, hint)
else:
logging.warning("Unknown item: " + item)
if command[0][0] != '/':
notify_all(ctx, '[Server]: ' + input)
except:
import traceback
traceback.print_exc()
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default=None)
parser.add_argument('--port', default=38281, type=int)
parser.add_argument('--password', default=None)
parser.add_argument('--multidata', default=None)
parser.add_argument('--savefile', default=None)
parser.add_argument('--disable_save', default=False, action='store_true')
parser.add_argument('--loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical'])
args = parser.parse_args()
logging.basicConfig(format='[%(asctime)s] %(message)s', level=getattr(logging, args.loglevel.upper(), logging.INFO))
ctx = Context(args.host, args.port, args.password)
ctx.data_filename = args.multidata
try:
if not ctx.data_filename:
import tkinter
import tkinter.filedialog
root = tkinter.Tk()
root.withdraw()
ctx.data_filename = tkinter.filedialog.askopenfilename(filetypes=(("Multiworld data","*multidata"),))
with open(ctx.data_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
for team, names in enumerate(jsonobj['names']):
for player, name in enumerate(names, 1):
ctx.player_names[(team, player)] = name
ctx.rom_names = {tuple(rom): (team, slot) for slot, team, rom in jsonobj['roms']}
ctx.remote_items = set(jsonobj['remote_items'])
ctx.locations = {tuple(k): tuple(v) for k, v in jsonobj['locations']}
except Exception as e:
logging.error('Failed to read multiworld data (%s)' % e)
return
ip = urllib.request.urlopen('https://v4.ident.me').read().decode('utf8') if not ctx.host else ctx.host
logging.info('Hosting game at %s:%d (%s)' % (ip, ctx.port, 'No password' if not ctx.password else 'Password: %s' % ctx.password))
ctx.disable_save = args.disable_save
if not ctx.disable_save:
if not ctx.save_filename:
ctx.save_filename = (ctx.data_filename[:-9] if ctx.data_filename[-9:] == 'multidata' else (ctx.data_filename + '_')) + 'multisave'
try:
with open(ctx.save_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
rom_names = jsonobj[0]
received_items = {tuple(k): [ReceivedItem(**i) for i in v] for k, v in jsonobj[1]}
if not all([ctx.rom_names[tuple(rom)] == (team, slot) for rom, (team, slot) in rom_names]):
raise Exception('Save file mismatch, will start a new game')
ctx.received_items = received_items
logging.info('Loaded save file with %d received items for %d players' % (sum([len(p) for p in received_items.values()]), len(received_items)))
except FileNotFoundError:
logging.error('No save data found, starting a new game')
except Exception as e:
logging.info(e)
ctx.server = websockets.serve(functools.partial(server,ctx=ctx), ctx.host, ctx.port, ping_timeout=None, ping_interval=None)
await ctx.server
await console(ctx)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_until_complete(asyncio.gather(*asyncio.Task.all_tasks()))
loop.close()
| 42.793981 | 230 | 0.584411 | import argparse
import asyncio
import functools
import json
import logging
import re
import shlex
import urllib.request
import zlib
import ModuleUpdate
ModuleUpdate.update()
import websockets
import aioconsole
import Items
import Regions
from MultiClient import ReceivedItem, get_item_name_from_id, get_location_name_from_address
class Client:
def __init__(self, socket):
self.socket = socket
self.auth = False
self.name = None
self.team = None
self.slot = None
self.send_index = 0
class Context:
def __init__(self, host, port, password):
self.data_filename = None
self.save_filename = None
self.disable_save = False
self.player_names = {}
self.rom_names = {}
self.remote_items = set()
self.locations = {}
self.host = host
self.port = port
self.password = password
self.server = None
self.countdown_timer = 0
self.clients = []
self.received_items = {}
async def send_msgs(websocket, msgs):
if not websocket or not websocket.open or websocket.closed:
return
try:
await websocket.send(json.dumps(msgs))
except websockets.ConnectionClosed:
pass
def broadcast_all(ctx : Context, msgs):
for client in ctx.clients:
if client.auth:
asyncio.create_task(send_msgs(client.socket, msgs))
def broadcast_team(ctx : Context, team, msgs):
for client in ctx.clients:
if client.auth and client.team == team:
asyncio.create_task(send_msgs(client.socket, msgs))
def notify_all(ctx : Context, text):
logging.info("Notice (all): %s" % text)
broadcast_all(ctx, [['Print', text]])
def notify_team(ctx : Context, team : int, text : str):
logging.info("Notice (Team #%d): %s" % (team+1, text))
broadcast_team(ctx, team, [['Print', text]])
def notify_client(client : Client, text : str):
if not client.auth:
return
logging.info("Notice (Player %s in team %d): %s" % (client.name, client.team+1, text))
asyncio.create_task(send_msgs(client.socket, [['Print', text]]))
async def server(websocket, path, ctx : Context):
client = Client(websocket)
ctx.clients.append(client)
try:
await on_client_connected(ctx, client)
async for data in websocket:
for msg in json.loads(data):
if len(msg) == 1:
cmd = msg
args = None
else:
cmd = msg[0]
args = msg[1]
await process_client_cmd(ctx, client, cmd, args)
except Exception as e:
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
finally:
await on_client_disconnected(ctx, client)
ctx.clients.remove(client)
async def on_client_connected(ctx : Context, client : Client):
await send_msgs(client.socket, [['RoomInfo', {
'password': ctx.password is not None,
'players': [(client.team, client.slot, client.name) for client in ctx.clients if client.auth]
}]])
async def on_client_disconnected(ctx : Context, client : Client):
if client.auth:
await on_client_left(ctx, client)
async def on_client_joined(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has joined the game" % (client.name, client.team + 1))
async def on_client_left(ctx : Context, client : Client):
notify_all(ctx, "%s (Team #%d) has left the game" % (client.name, client.team + 1))
async def countdown(ctx : Context, timer):
notify_all(ctx, f'[Server]: Starting countdown of {timer}s')
if ctx.countdown_timer:
ctx.countdown_timer = timer
return
ctx.countdown_timer = timer
while ctx.countdown_timer > 0:
notify_all(ctx, f'[Server]: {ctx.countdown_timer}')
ctx.countdown_timer -= 1
await asyncio.sleep(1)
notify_all(ctx, f'[Server]: GO')
def get_connected_players_string(ctx : Context):
auth_clients = [c for c in ctx.clients if c.auth]
if not auth_clients:
return 'No player connected'
auth_clients.sort(key=lambda c: (c.team, c.slot))
current_team = 0
text = 'Team #1: '
for c in auth_clients:
if c.team != current_team:
text += f':: Team #{c.team + 1}: '
current_team = c.team
text += f'{c.name} '
return 'Connected players: ' + text[:-1]
def get_received_items(ctx : Context, team, player):
return ctx.received_items.setdefault((team, player), [])
def tuplize_received_items(items):
return [(item.item, item.location, item.player) for item in items]
def send_new_items(ctx : Context):
for client in ctx.clients:
if not client.auth:
continue
items = get_received_items(ctx, client.team, client.slot)
if len(items) > client.send_index:
asyncio.create_task(send_msgs(client.socket, [['ReceivedItems', (client.send_index, tuplize_received_items(items)[client.send_index:])]]))
client.send_index = len(items)
def forfeit_player(ctx : Context, team, slot):
all_locations = [values[0] for values in Regions.location_table.values() if type(values[0]) is int]
notify_all(ctx, "%s (Team #%d) has forfeited" % (ctx.player_names[(team, slot)], team + 1))
register_location_checks(ctx, team, slot, all_locations)
def register_location_checks(ctx : Context, team, slot, locations):
found_items = False
for location in locations:
if (location, slot) in ctx.locations:
target_item, target_player = ctx.locations[(location, slot)]
if target_player != slot or slot in ctx.remote_items:
found = False
recvd_items = get_received_items(ctx, team, target_player)
for recvd_item in recvd_items:
if recvd_item.location == location and recvd_item.player == slot:
found = True
break
if not found:
new_item = ReceivedItem(target_item, location, slot)
recvd_items.append(new_item)
if slot != target_player:
broadcast_team(ctx, team, [['ItemSent', (slot, location, target_player, target_item)]])
logging.info('(Team #%d) %s sent %s to %s (%s)' % (team+1, ctx.player_names[(team, slot)], get_item_name_from_id(target_item), ctx.player_names[(team, target_player)], get_location_name_from_address(location)))
found_items = True
send_new_items(ctx)
if found_items and not ctx.disable_save:
try:
with open(ctx.save_filename, "wb") as f:
jsonstr = json.dumps((list(ctx.rom_names.items()),
[(k, [i.__dict__ for i in v]) for k, v in ctx.received_items.items()]))
f.write(zlib.compress(jsonstr.encode("utf-8")))
except Exception as e:
logging.exception(e)
async def process_client_cmd(ctx : Context, client : Client, cmd, args):
if type(cmd) is not str:
await send_msgs(client.socket, [['InvalidCmd']])
return
if cmd == 'Connect':
if not args or type(args) is not dict or \
'password' not in args or type(args['password']) not in [str, type(None)] or \
'rom' not in args or type(args['rom']) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'Connect']])
return
errors = set()
if ctx.password is not None and args['password'] != ctx.password:
errors.add('InvalidPassword')
if tuple(args['rom']) not in ctx.rom_names:
errors.add('InvalidRom')
else:
team, slot = ctx.rom_names[tuple(args['rom'])]
if any([c.slot == slot and c.team == team for c in ctx.clients if c.auth]):
errors.add('SlotAlreadyTaken')
else:
client.name = ctx.player_names[(team, slot)]
client.team = team
client.slot = slot
if errors:
await send_msgs(client.socket, [['ConnectionRefused', list(errors)]])
else:
client.auth = True
reply = [['Connected', [(client.team, client.slot), [(p, n) for (t, p), n in ctx.player_names.items() if t == client.team]]]]
items = get_received_items(ctx, client.team, client.slot)
if items:
reply.append(['ReceivedItems', (0, tuplize_received_items(items))])
client.send_index = len(items)
await send_msgs(client.socket, reply)
await on_client_joined(ctx, client)
if not client.auth:
return
if cmd == 'Sync':
items = get_received_items(ctx, client.team, client.slot)
if items:
client.send_index = len(items)
await send_msgs(client.socket, [['ReceivedItems', (0, tuplize_received_items(items))]])
if cmd == 'LocationChecks':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationChecks']])
return
register_location_checks(ctx, client.team, client.slot, args)
if cmd == 'LocationScouts':
if type(args) is not list:
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
locs = []
for location in args:
if type(location) is not int or 0 >= location > len(Regions.location_table):
await send_msgs(client.socket, [['InvalidArguments', 'LocationScouts']])
return
loc_name = list(Regions.location_table.keys())[location - 1]
target_item, target_player = ctx.locations[(Regions.location_table[loc_name][0], client.slot)]
replacements = {'SmallKey': 0xA2, 'BigKey': 0x9D, 'Compass': 0x8D, 'Map': 0x7D}
item_type = [i[2] for i in Items.item_table.values() if type(i[3]) is int and i[3] == target_item]
if item_type:
target_item = replacements.get(item_type[0], target_item)
locs.append([loc_name, location, target_item, target_player])
logging.info(f"{client.name} in team {client.team+1} scouted {', '.join([l[0] for l in locs])}")
await send_msgs(client.socket, [['LocationInfo', [l[1:] for l in locs]]])
if cmd == 'Say':
if type(args) is not str or not args.isprintable():
await send_msgs(client.socket, [['InvalidArguments', 'Say']])
return
notify_all(ctx, client.name + ': ' + args)
if args.startswith('!players'):
notify_all(ctx, get_connected_players_string(ctx))
if args.startswith('!forfeit'):
forfeit_player(ctx, client.team, client.slot)
if args.startswith('!countdown'):
try:
timer = int(args.split()[1])
except (IndexError, ValueError):
timer = 10
asyncio.create_task(countdown(ctx, timer))
def set_password(ctx : Context, password):
ctx.password = password
logging.warning('Password set to ' + password if password is not None else 'Password disabled')
async def console(ctx : Context):
while True:
input = await aioconsole.ainput()
try:
command = shlex.split(input)
if not command:
continue
if command[0] == '/exit':
ctx.server.ws_server.close()
break
if command[0] == '/players':
logging.info(get_connected_players_string(ctx))
if command[0] == '/password':
set_password(ctx, command[1] if len(command) > 1 else None)
if command[0] == '/kick' and len(command) > 1:
team = int(command[2]) - 1 if len(command) > 2 and command[2].isdigit() else None
for client in ctx.clients:
if client.auth and client.name.lower() == command[1].lower() and (team is None or team == client.team):
if client.socket and not client.socket.closed:
await client.socket.close()
if command[0] == '/forfeitslot' and len(command) > 1 and command[1].isdigit():
if len(command) > 2 and command[2].isdigit():
team = int(command[1]) - 1
slot = int(command[2])
else:
team = 0
slot = int(command[1])
forfeit_player(ctx, team, slot)
if command[0] == '/forfeitplayer' and len(command) > 1:
seeked_player = command[1].lower()
for (team, slot), name in ctx.player_names.items():
if name.lower() == seeked_player:
forfeit_player(ctx, team, slot)
if command[0] == '/senditem' and len(command) > 2:
[(player, item)] = re.findall(r'\S* (\S*) (.*)', input)
if item in Items.item_table:
for client in ctx.clients:
if client.auth and client.name.lower() == player.lower():
new_item = ReceivedItem(Items.item_table[item][3], "cheat console", client.slot)
get_received_items(ctx, client.team, client.slot).append(new_item)
notify_all(ctx, 'Cheat console: sending "' + item + '" to ' + client.name)
send_new_items(ctx)
else:
logging.warning("Unknown item: " + item)
if command[0] == '/hint':
for (team,slot), name in ctx.player_names.items():
if len(command) == 1:
print("Use /hint {Playername} {itemname}\nFor example /hint Berserker Lamp")
elif name.lower() == command[1].lower():
item = " ".join(command[2:])
if item in Items.item_table:
seeked_item_id = Items.item_table[item][3]
for check, result in ctx.locations.items():
item_id, receiving_player = result
if receiving_player == slot and item_id == seeked_item_id:
location_id, finding_player = check
name_finder = ctx.player_names[team, finding_player]
hint = f"[Hint]: {name}'s {item} can be found at " \
f"{get_location_name_from_address(location_id)} in {name_finder}'s World"
notify_team(ctx, team, hint)
else:
logging.warning("Unknown item: " + item)
if command[0][0] != '/':
notify_all(ctx, '[Server]: ' + input)
except:
import traceback
traceback.print_exc()
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default=None)
parser.add_argument('--port', default=38281, type=int)
parser.add_argument('--password', default=None)
parser.add_argument('--multidata', default=None)
parser.add_argument('--savefile', default=None)
parser.add_argument('--disable_save', default=False, action='store_true')
parser.add_argument('--loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical'])
args = parser.parse_args()
logging.basicConfig(format='[%(asctime)s] %(message)s', level=getattr(logging, args.loglevel.upper(), logging.INFO))
ctx = Context(args.host, args.port, args.password)
ctx.data_filename = args.multidata
try:
if not ctx.data_filename:
import tkinter
import tkinter.filedialog
root = tkinter.Tk()
root.withdraw()
ctx.data_filename = tkinter.filedialog.askopenfilename(filetypes=(("Multiworld data","*multidata"),))
with open(ctx.data_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
for team, names in enumerate(jsonobj['names']):
for player, name in enumerate(names, 1):
ctx.player_names[(team, player)] = name
ctx.rom_names = {tuple(rom): (team, slot) for slot, team, rom in jsonobj['roms']}
ctx.remote_items = set(jsonobj['remote_items'])
ctx.locations = {tuple(k): tuple(v) for k, v in jsonobj['locations']}
except Exception as e:
logging.error('Failed to read multiworld data (%s)' % e)
return
ip = urllib.request.urlopen('https://v4.ident.me').read().decode('utf8') if not ctx.host else ctx.host
logging.info('Hosting game at %s:%d (%s)' % (ip, ctx.port, 'No password' if not ctx.password else 'Password: %s' % ctx.password))
ctx.disable_save = args.disable_save
if not ctx.disable_save:
if not ctx.save_filename:
ctx.save_filename = (ctx.data_filename[:-9] if ctx.data_filename[-9:] == 'multidata' else (ctx.data_filename + '_')) + 'multisave'
try:
with open(ctx.save_filename, 'rb') as f:
jsonobj = json.loads(zlib.decompress(f.read()).decode("utf-8"))
rom_names = jsonobj[0]
received_items = {tuple(k): [ReceivedItem(**i) for i in v] for k, v in jsonobj[1]}
if not all([ctx.rom_names[tuple(rom)] == (team, slot) for rom, (team, slot) in rom_names]):
raise Exception('Save file mismatch, will start a new game')
ctx.received_items = received_items
logging.info('Loaded save file with %d received items for %d players' % (sum([len(p) for p in received_items.values()]), len(received_items)))
except FileNotFoundError:
logging.error('No save data found, starting a new game')
except Exception as e:
logging.info(e)
ctx.server = websockets.serve(functools.partial(server,ctx=ctx), ctx.host, ctx.port, ping_timeout=None, ping_interval=None)
await ctx.server
await console(ctx)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.run_until_complete(asyncio.gather(*asyncio.Task.all_tasks()))
loop.close()
| true | true |
f72fb70520827d455a85f96859c23ec6801cf6f3 | 5,184 | py | Python | tests/test_entrypoint.py | tdilauro/circulation-core | 8086ca8cbedd5f4b2a0c44df97889d078ff79aac | [
"Apache-2.0"
] | 1 | 2021-11-16T00:58:43.000Z | 2021-11-16T00:58:43.000Z | tests/test_entrypoint.py | tdilauro/circulation-core | 8086ca8cbedd5f4b2a0c44df97889d078ff79aac | [
"Apache-2.0"
] | 16 | 2021-05-17T19:24:47.000Z | 2021-12-15T13:57:34.000Z | tests/test_entrypoint.py | tdilauro/circulation-core | 8086ca8cbedd5f4b2a0c44df97889d078ff79aac | [
"Apache-2.0"
] | 1 | 2021-05-12T19:11:52.000Z | 2021-05-12T19:11:52.000Z | import json
import pytest
from ..entrypoint import (
AudiobooksEntryPoint,
EbooksEntryPoint,
EntryPoint,
EverythingEntryPoint,
MediumEntryPoint,
)
from ..external_search import Filter
from ..model import Edition, Work
from ..testing import DatabaseTest
class TestEntryPoint(DatabaseTest):
def test_defaults(self):
everything, ebooks, audiobooks = EntryPoint.ENTRY_POINTS
assert EverythingEntryPoint == everything
assert EbooksEntryPoint == ebooks
assert AudiobooksEntryPoint == audiobooks
display = EntryPoint.DISPLAY_TITLES
assert "eBooks" == display[ebooks]
assert "Audiobooks" == display[audiobooks]
assert Edition.BOOK_MEDIUM == EbooksEntryPoint.INTERNAL_NAME
assert Edition.AUDIO_MEDIUM == AudiobooksEntryPoint.INTERNAL_NAME
assert "http://schema.org/CreativeWork" == everything.URI
for ep in (EbooksEntryPoint, AudiobooksEntryPoint):
assert ep.URI == Edition.medium_to_additional_type[ep.INTERNAL_NAME]
def test_no_changes(self):
# EntryPoint doesn't modify queries or search filters.
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
def test_register(self):
class Mock(object):
pass
args = [Mock, "Mock!"]
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "must define INTERNAL_NAME" in str(excinfo.value)
# Test successful registration.
Mock.INTERNAL_NAME = "a name"
EntryPoint.register(*args)
assert Mock in EntryPoint.ENTRY_POINTS
assert "Mock!" == EntryPoint.DISPLAY_TITLES[Mock]
assert Mock not in EntryPoint.DEFAULT_ENABLED
# Can't register twice.
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "Duplicate entry point internal name: a name" in str(excinfo.value)
EntryPoint.unregister(Mock)
# Test successful registration as a default-enabled entry point.
EntryPoint.register(*args, default_enabled=True)
assert Mock in EntryPoint.DEFAULT_ENABLED
# Can't register two different entry points with the same
# display name.
class Mock2(object):
INTERNAL_NAME = "mock2"
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(Mock2, "Mock!")
assert "Duplicate entry point display name: Mock!" in str(excinfo.value)
EntryPoint.unregister(Mock)
assert Mock not in EntryPoint.DEFAULT_ENABLED
class TestEverythingEntryPoint(DatabaseTest):
def test_no_changes(self):
# EverythingEntryPoint doesn't modify queries or searches
# beyond the default behavior for any entry point.
assert "All" == EverythingEntryPoint.INTERNAL_NAME
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
class TestMediumEntryPoint(DatabaseTest):
def test_modify_database_query(self):
# Create a video, and a entry point that contains videos.
work = self._work(with_license_pool=True)
work.license_pools[0].presentation_edition.medium = Edition.VIDEO_MEDIUM
class Videos(MediumEntryPoint):
INTERNAL_NAME = Edition.VIDEO_MEDIUM
qu = self._db.query(Work)
# The default entry points filter out the video.
for entrypoint in EbooksEntryPoint, AudiobooksEntryPoint:
modified = entrypoint.modify_database_query(self._db, qu)
assert [] == modified.all()
# But the video entry point includes it.
videos = Videos.modify_database_query(self._db, qu)
assert [work.id] == [x.id for x in videos]
def test_modify_search_filter(self):
class Mock(MediumEntryPoint):
INTERNAL_NAME = object()
filter = Filter(media=object())
Mock.modify_search_filter(filter)
assert [Mock.INTERNAL_NAME] == filter.media
class TestLibrary(DatabaseTest):
"""Test a Library's interaction with EntryPoints."""
def test_enabled_entrypoints(self):
l = self._default_library
setting = l.setting(EntryPoint.ENABLED_SETTING)
# When the value is not set, the default is used.
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
setting.value = None
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
# Names that don't correspond to registered entry points are
# ignored. Names that do are looked up.
setting.value = json.dumps(
["no such entry point", AudiobooksEntryPoint.INTERNAL_NAME]
)
assert [AudiobooksEntryPoint] == list(l.entrypoints)
# An empty list is a valid value.
setting.value = json.dumps([])
assert [] == list(l.entrypoints)
| 34.331126 | 82 | 0.673032 | import json
import pytest
from ..entrypoint import (
AudiobooksEntryPoint,
EbooksEntryPoint,
EntryPoint,
EverythingEntryPoint,
MediumEntryPoint,
)
from ..external_search import Filter
from ..model import Edition, Work
from ..testing import DatabaseTest
class TestEntryPoint(DatabaseTest):
def test_defaults(self):
everything, ebooks, audiobooks = EntryPoint.ENTRY_POINTS
assert EverythingEntryPoint == everything
assert EbooksEntryPoint == ebooks
assert AudiobooksEntryPoint == audiobooks
display = EntryPoint.DISPLAY_TITLES
assert "eBooks" == display[ebooks]
assert "Audiobooks" == display[audiobooks]
assert Edition.BOOK_MEDIUM == EbooksEntryPoint.INTERNAL_NAME
assert Edition.AUDIO_MEDIUM == AudiobooksEntryPoint.INTERNAL_NAME
assert "http://schema.org/CreativeWork" == everything.URI
for ep in (EbooksEntryPoint, AudiobooksEntryPoint):
assert ep.URI == Edition.medium_to_additional_type[ep.INTERNAL_NAME]
def test_no_changes(self):
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
def test_register(self):
class Mock(object):
pass
args = [Mock, "Mock!"]
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "must define INTERNAL_NAME" in str(excinfo.value)
# Test successful registration.
Mock.INTERNAL_NAME = "a name"
EntryPoint.register(*args)
assert Mock in EntryPoint.ENTRY_POINTS
assert "Mock!" == EntryPoint.DISPLAY_TITLES[Mock]
assert Mock not in EntryPoint.DEFAULT_ENABLED
# Can't register twice.
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(*args)
assert "Duplicate entry point internal name: a name" in str(excinfo.value)
EntryPoint.unregister(Mock)
EntryPoint.register(*args, default_enabled=True)
assert Mock in EntryPoint.DEFAULT_ENABLED
# display name.
class Mock2(object):
INTERNAL_NAME = "mock2"
with pytest.raises(ValueError) as excinfo:
EntryPoint.register(Mock2, "Mock!")
assert "Duplicate entry point display name: Mock!" in str(excinfo.value)
EntryPoint.unregister(Mock)
assert Mock not in EntryPoint.DEFAULT_ENABLED
class TestEverythingEntryPoint(DatabaseTest):
def test_no_changes(self):
# EverythingEntryPoint doesn't modify queries or searches
assert "All" == EverythingEntryPoint.INTERNAL_NAME
qu = self._db.query(Edition)
assert qu == EntryPoint.modify_database_query(self._db, qu)
args = dict(arg="value")
filter = object()
assert filter == EverythingEntryPoint.modify_search_filter(filter)
class TestMediumEntryPoint(DatabaseTest):
def test_modify_database_query(self):
work = self._work(with_license_pool=True)
work.license_pools[0].presentation_edition.medium = Edition.VIDEO_MEDIUM
class Videos(MediumEntryPoint):
INTERNAL_NAME = Edition.VIDEO_MEDIUM
qu = self._db.query(Work)
for entrypoint in EbooksEntryPoint, AudiobooksEntryPoint:
modified = entrypoint.modify_database_query(self._db, qu)
assert [] == modified.all()
videos = Videos.modify_database_query(self._db, qu)
assert [work.id] == [x.id for x in videos]
def test_modify_search_filter(self):
class Mock(MediumEntryPoint):
INTERNAL_NAME = object()
filter = Filter(media=object())
Mock.modify_search_filter(filter)
assert [Mock.INTERNAL_NAME] == filter.media
class TestLibrary(DatabaseTest):
def test_enabled_entrypoints(self):
l = self._default_library
setting = l.setting(EntryPoint.ENABLED_SETTING)
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
setting.value = None
assert EntryPoint.DEFAULT_ENABLED == list(l.entrypoints)
# ignored. Names that do are looked up.
setting.value = json.dumps(
["no such entry point", AudiobooksEntryPoint.INTERNAL_NAME]
)
assert [AudiobooksEntryPoint] == list(l.entrypoints)
# An empty list is a valid value.
setting.value = json.dumps([])
assert [] == list(l.entrypoints)
| true | true |
f72fb746949f5509b0ded3a7b05fc061d6d0201a | 879 | bzl | Python | third_party/farmhash/workspace.bzl | dmpiergiacomo/tensorflow | 0ecdc6dc2dbc2381c9317f274bd39281294dfc97 | [
"Apache-2.0"
] | 6 | 2021-03-30T07:42:04.000Z | 2022-03-23T02:42:36.000Z | third_party/farmhash/workspace.bzl | dmpiergiacomo/tensorflow | 0ecdc6dc2dbc2381c9317f274bd39281294dfc97 | [
"Apache-2.0"
] | 1,056 | 2019-12-15T01:20:31.000Z | 2022-02-10T02:06:28.000Z | third_party/farmhash/workspace.bzl | dmpiergiacomo/tensorflow | 0ecdc6dc2dbc2381c9317f274bd39281294dfc97 | [
"Apache-2.0"
] | 6 | 2016-09-07T04:00:15.000Z | 2022-01-12T01:47:38.000Z | """Provides the repository macro to import farmhash."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports farmhash."""
# Attention: tools parse and update these lines.
FARMHASH_COMMIT = "816a4ae622e964763ca0862d9dbd19324a1eaf45"
FARMHASH_SHA256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0"
tf_http_archive(
name = "farmhash_archive",
build_file = "//third_party/farmhash:farmhash.BUILD",
sha256 = FARMHASH_SHA256,
strip_prefix = "farmhash-{commit}".format(commit = FARMHASH_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/{commit}.tar.gz".format(commit = FARMHASH_COMMIT),
"https://github.com/google/farmhash/archive/{commit}.tar.gz".format(commit = FARMHASH_COMMIT),
],
)
| 39.954545 | 151 | 0.697383 |
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
FARMHASH_COMMIT = "816a4ae622e964763ca0862d9dbd19324a1eaf45"
FARMHASH_SHA256 = "6560547c63e4af82b0f202cb710ceabb3f21347a4b996db565a411da5b17aba0"
tf_http_archive(
name = "farmhash_archive",
build_file = "//third_party/farmhash:farmhash.BUILD",
sha256 = FARMHASH_SHA256,
strip_prefix = "farmhash-{commit}".format(commit = FARMHASH_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/farmhash/archive/{commit}.tar.gz".format(commit = FARMHASH_COMMIT),
"https://github.com/google/farmhash/archive/{commit}.tar.gz".format(commit = FARMHASH_COMMIT),
],
)
| true | true |
f72fb7689d8d911e8d95780e9541175223f17521 | 2,251 | py | Python | apis/forecast.py | mrasap/powderbooking_backend | ffc6e31f52bd78a18293adbfbb30cd0211a4700c | [
"Apache-2.0"
] | null | null | null | apis/forecast.py | mrasap/powderbooking_backend | ffc6e31f52bd78a18293adbfbb30cd0211a4700c | [
"Apache-2.0"
] | null | null | null | apis/forecast.py | mrasap/powderbooking_backend | ffc6e31f52bd78a18293adbfbb30cd0211a4700c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Michael Kemna.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask_restplus import Namespace, Resource
from powderbooking.models import model_forecast
from sqlalchemy import MetaData
from database import db
from database.query import Query
from utils.convert_models import convert_sqlalchemy_to_restplus_model
api = Namespace('forecast', description='Weather reports of a overview')
forecast = convert_sqlalchemy_to_restplus_model(table=model_forecast(metadata=MetaData()))
api.add_model(name=forecast.name, definition=forecast)
@api.route('/current/<int:resort_id>')
@api.param('resort_id', 'The overview identifier')
@api.response(404, 'No current forecast report for given overview identifier found')
class ForecastCurrent(Resource):
@api.doc('get_current_forecast_report')
@api.marshal_list_with(fields=forecast)
def get(self, resort_id: int):
"""Get the current forecast report from today for the given overview identifier"""
result = db.execute_query(Query.select_forecast_current, resort_id=resort_id)
if result.rowcount > 0:
return result.fetchall()
api.abort(404)
@api.route('/past/<int:resort_id>')
@api.param('resort_id', 'The overview identifier')
@api.response(404, 'No past forecast report for given overview identifier found')
class ForecastPast(Resource):
@api.doc('get_past_forecast_report')
@api.marshal_list_with(fields=forecast)
def get(self, resort_id: int):
"""Get the past forecast reports of today for the given overview identifier"""
result = db.execute_query(Query.select_forecast_past, resort_id=resort_id)
if result.rowcount > 0:
return result.fetchall()
api.abort(404)
| 38.810345 | 90 | 0.749889 |
from flask_restplus import Namespace, Resource
from powderbooking.models import model_forecast
from sqlalchemy import MetaData
from database import db
from database.query import Query
from utils.convert_models import convert_sqlalchemy_to_restplus_model
api = Namespace('forecast', description='Weather reports of a overview')
forecast = convert_sqlalchemy_to_restplus_model(table=model_forecast(metadata=MetaData()))
api.add_model(name=forecast.name, definition=forecast)
@api.route('/current/<int:resort_id>')
@api.param('resort_id', 'The overview identifier')
@api.response(404, 'No current forecast report for given overview identifier found')
class ForecastCurrent(Resource):
@api.doc('get_current_forecast_report')
@api.marshal_list_with(fields=forecast)
def get(self, resort_id: int):
result = db.execute_query(Query.select_forecast_current, resort_id=resort_id)
if result.rowcount > 0:
return result.fetchall()
api.abort(404)
@api.route('/past/<int:resort_id>')
@api.param('resort_id', 'The overview identifier')
@api.response(404, 'No past forecast report for given overview identifier found')
class ForecastPast(Resource):
@api.doc('get_past_forecast_report')
@api.marshal_list_with(fields=forecast)
def get(self, resort_id: int):
result = db.execute_query(Query.select_forecast_past, resort_id=resort_id)
if result.rowcount > 0:
return result.fetchall()
api.abort(404)
| true | true |
f72fb77842fa2f02ed07d03d38852d1b6dade3ed | 1,296 | py | Python | src/fetchcode/vcs/pip/_internal/utils/pkg_resources.py | quepop/fetchcode | ac2461bdf7a249d8815987b4d421dbc615c043b9 | [
"Apache-2.0"
] | 7 | 2019-10-04T07:27:41.000Z | 2021-06-07T04:39:18.000Z | src/fetchcode/vcs/pip/_internal/utils/pkg_resources.py | quepop/fetchcode | ac2461bdf7a249d8815987b4d421dbc615c043b9 | [
"Apache-2.0"
] | 64 | 2019-10-07T12:40:56.000Z | 2022-02-17T18:44:37.000Z | src/fetchcode/vcs/pip/_internal/utils/pkg_resources.py | quepop/fetchcode | ac2461bdf7a249d8815987b4d421dbc615c043b9 | [
"Apache-2.0"
] | 16 | 2019-10-04T08:48:12.000Z | 2021-06-11T01:22:56.000Z | from fetchcode.vcs.pip._vendor.pkg_resources import yield_lines
from fetchcode.vcs.pip._vendor.six import ensure_str
from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterable, List
class DictMetadata(object):
"""IMetadataProvider that reads metadata files from a dictionary.
"""
def __init__(self, metadata):
# type: (Dict[str, bytes]) -> None
self._metadata = metadata
def has_metadata(self, name):
# type: (str) -> bool
return name in self._metadata
def get_metadata(self, name):
# type: (str) -> str
try:
return ensure_str(self._metadata[name])
except UnicodeDecodeError as e:
# Mirrors handling done in pkg_resources.NullProvider.
e.reason += " in {} file".format(name)
raise
def get_metadata_lines(self, name):
# type: (str) -> Iterable[str]
return yield_lines(self.get_metadata(name))
def metadata_isdir(self, name):
# type: (str) -> bool
return False
def metadata_listdir(self, name):
# type: (str) -> List[str]
return []
def run_script(self, script_name, namespace):
# type: (str, str) -> None
pass
| 28.8 | 71 | 0.631944 | from fetchcode.vcs.pip._vendor.pkg_resources import yield_lines
from fetchcode.vcs.pip._vendor.six import ensure_str
from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterable, List
class DictMetadata(object):
def __init__(self, metadata):
self._metadata = metadata
def has_metadata(self, name):
return name in self._metadata
def get_metadata(self, name):
try:
return ensure_str(self._metadata[name])
except UnicodeDecodeError as e:
e.reason += " in {} file".format(name)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def metadata_isdir(self, name):
return False
def metadata_listdir(self, name):
return []
def run_script(self, script_name, namespace):
pass
| true | true |
f72fb8bd9479fd40d29757e53cc63e0a56a9ebf3 | 757 | py | Python | leo/plugins/quit_leo.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | 1 | 2021-02-08T21:22:38.000Z | 2021-02-08T21:22:38.000Z | leo/plugins/quit_leo.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | null | null | null | leo/plugins/quit_leo.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | null | null | null | #@+leo-ver=5-thin
#@+node:edream.110203113231.734: * @file ../plugins/quit_leo.py
""" Shows how to force Leo to quit."""
#@@language python
#@@tabwidth -4
from leo.core import leoGlobals as g
def init():
'''Return True if the plugin has loaded successfully.'''
ok = not g.app.unitTesting # Not for unit testing.
if ok:
def forceLeoToQuit(tag, keywords):
if not g.app.initing:
g.pr("forceLeoToQuit", tag)
g.app.forceShutdown()
# Force a shutdown at any other time, even "idle" time.
# Exception: do not call g.app.forceShutdown in a "start2" hook.
g.pr(__doc__)
g.registerHandler("idle", forceLeoToQuit)
g.plugin_signon(__name__)
return ok
#@-leo
| 30.28 | 72 | 0.620872 |
from leo.core import leoGlobals as g
def init():
ok = not g.app.unitTesting
if ok:
def forceLeoToQuit(tag, keywords):
if not g.app.initing:
g.pr("forceLeoToQuit", tag)
g.app.forceShutdown()
g.pr(__doc__)
g.registerHandler("idle", forceLeoToQuit)
g.plugin_signon(__name__)
return ok
| true | true |
f72fb9520e3654537387e3873817fd1158160606 | 1,721 | py | Python | deeptennis/data/transforms.py | sethah/deeptennis | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | [
"MIT"
] | 27 | 2018-11-23T21:37:14.000Z | 2021-11-22T08:44:35.000Z | deeptennis/data/transforms.py | sethah/deeptennis | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | [
"MIT"
] | 6 | 2019-07-09T16:26:56.000Z | 2021-05-17T17:29:42.000Z | deeptennis/data/transforms.py | sethah/deeptennis | a689c5f1d6f5ff1d665aec99b8db6262d3442c3a | [
"MIT"
] | 4 | 2019-06-11T06:44:30.000Z | 2021-02-27T14:49:02.000Z | import numpy as np
import albumentations.augmentations.functional as af
from albumentations.core.transforms_interface import DualTransform
from allencv.data.transforms import _ImageTransformWrapper, ImageTransform
class CourtKeypointFlip(DualTransform):
"""Flip the input horizontally around the y-axis.
Args:
p (float): probability of applying the transform. Default: 0.5.
Targets:
image, mask, bboxes, keypoints
Image types:
uint8, float32
"""
def apply(self, img, **params):
if img.ndim == 3 and img.shape[2] > 1 and img.dtype == np.uint8:
# Opencv is faster than numpy only in case of
# non-gray scale 8bits images
return af.hflip_cv2(img)
else:
return af.hflip(img)
def apply_to_bbox(self, bbox, **params):
return af.bbox_hflip(bbox, **params)
def apply_to_keypoint(self, keypoint, **params):
return af.keypoint_hflip(keypoint, **params)
def apply_to_keypoints(self, keypoints, **params):
keypoints = [list(keypoint) for keypoint in keypoints]
kps = [self.apply_to_keypoint(keypoint[:4], **params) + keypoint[4:] for keypoint in keypoints]
# print(kps)
# sorted_x = sorted(kps, key=lambda x: x[0])
# bottom_left = max(sorted_x[:2], key=lambda x: x[1])
# top_left = min(sorted_x[:2], key=lambda x: x[1])
# bottom_right = max(sorted_x[2:], key=lambda x: x[1])
# top_right = min(sorted_x[2:], key=lambda x: x[1])
# tmp = [bottom_left, bottom_right, top_right, top_left]
# print(tmp)
return kps
ImageTransform.register("keypoint_hflip")(_ImageTransformWrapper(CourtKeypointFlip)) | 34.42 | 103 | 0.651365 | import numpy as np
import albumentations.augmentations.functional as af
from albumentations.core.transforms_interface import DualTransform
from allencv.data.transforms import _ImageTransformWrapper, ImageTransform
class CourtKeypointFlip(DualTransform):
def apply(self, img, **params):
if img.ndim == 3 and img.shape[2] > 1 and img.dtype == np.uint8:
return af.hflip_cv2(img)
else:
return af.hflip(img)
def apply_to_bbox(self, bbox, **params):
return af.bbox_hflip(bbox, **params)
def apply_to_keypoint(self, keypoint, **params):
return af.keypoint_hflip(keypoint, **params)
def apply_to_keypoints(self, keypoints, **params):
keypoints = [list(keypoint) for keypoint in keypoints]
kps = [self.apply_to_keypoint(keypoint[:4], **params) + keypoint[4:] for keypoint in keypoints]
return kps
ImageTransform.register("keypoint_hflip")(_ImageTransformWrapper(CourtKeypointFlip)) | true | true |
f72fbad2adffd95d82e895b59a2b7ca1499c137d | 193 | py | Python | python/interpret-core/interpret/greybox/__init__.py | prateekiiest/interpret | b5530a587251a77516ab443037fc37f71708564c | [
"MIT"
] | 2,674 | 2019-10-03T14:14:35.000Z | 2022-03-31T13:40:49.000Z | python/interpret-core/interpret/greybox/__init__.py | prateekiiest/interpret | b5530a587251a77516ab443037fc37f71708564c | [
"MIT"
] | 257 | 2019-11-08T19:22:56.000Z | 2022-03-29T20:09:07.000Z | python/interpret-core/interpret/greybox/__init__.py | prateekiiest/interpret | b5530a587251a77516ab443037fc37f71708564c | [
"MIT"
] | 367 | 2019-10-31T15:33:21.000Z | 2022-03-31T13:40:50.000Z | # Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
from .treeinterpreter import TreeInterpreter # noqa: F401
from .shaptree import ShapTree # noqa: F401
| 32.166667 | 58 | 0.792746 |
from .treeinterpreter import TreeInterpreter
from .shaptree import ShapTree
| true | true |
f72fbadd2f5f8ca5d3e6c9fc5dc5627d098e888f | 5,496 | py | Python | source/generate_symbolic_derivatives.py | daccordeon/CEonlyPony | 7af50792a3a28101391397fce1e2b5e01d919701 | [
"BSD-3-Clause"
] | null | null | null | source/generate_symbolic_derivatives.py | daccordeon/CEonlyPony | 7af50792a3a28101391397fce1e2b5e01d919701 | [
"BSD-3-Clause"
] | null | null | null | source/generate_symbolic_derivatives.py | daccordeon/CEonlyPony | 7af50792a3a28101391397fce1e2b5e01d919701 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""Generate symbolic derivatives as lambdified functions for gwbench.
When run as a script: generate all symbolic derivatives for tf2_tidal at all standard locations ahead of benchmarking.
Slurm gets upset when multiple tasks try to create the derivatives if there aren't any there already, so run in series.
Usage:
$ python3 generate_symbolic_derivatives.py
License:
BSD 3-Clause License
Copyright (c) 2022, James Gardner.
All rights reserved except for those for the gwbench code which remain reserved
by S. Borhanian; the gwbench code is included in this repository for convenience.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from typing import List, Set, Dict, Tuple, Optional, Union
import os
from gwbench import wf_class as wfc
from gwbench import detector_response_derivatives as drd
def generate_symbolic_derivatives(
wf_model_name: str,
wf_other_var_dic: Optional[Dict[str, str]],
deriv_symbs_string: str,
locs: List[str],
use_rot: bool,
output_path: Optional[str] = None,
print_progress: bool = True,
) -> None:
"""Generate symbolic derivatives, from generate_lambdified_functions.py from gwbench.
Use network's wf_model_name, wf_other_var_dic, deriv_symbs_string, and use_rot.
Will print 'Done.' when finished unless all files already exist in which it will print as such.
Args:
wf_model_name: Waveform model name.
wf_other_var_dic: Waveform approximant.
deriv_symbs_string: Symbols to take derivatives wrt.
locs: Detector locations.
use_rot: Whether to account for Earth's rotation.
output_path: Output file path.
print_progress: Whether to print progress.
"""
# # how to print settings as a sanity check
# print('wf_model_name = \'{}\''.format(wf.wf_model_name))
# print('wf_other_var_dic = {}'.format(wf.wf_other_var_dic))
# print('deriv_symbs_string = \'{}\''.format(deriv_symbs_string))
# print('use_rot = %i'%use_rot)
# skip if derivatives already exist
file_names = [
"par_deriv_WFM_"
+ wf_model_name
+ "_VAR_"
+ deriv_symbs_string.replace(" ", "_")
+ "_DET_"
+ key
+ ".dat"
for key in locs
]
file_names.append(
"par_deriv_WFM_"
+ wf_model_name
+ "_VAR_"
+ deriv_symbs_string.replace(" ra", "")
.replace(" dec", "")
.replace(" psi", "")
.replace(" ", "_")
+ "_DET_"
+ "pl_cr"
+ ".dat"
)
path = "lambdified_functions/"
file_names_existing = [
file_name for file_name in file_names if os.path.isfile(path + file_name)
]
if len(file_names_existing) < len(file_names):
# if a file doesn't exist, generate them all again
# TODO: make this more efficient and just generate the missing files, or, do it in parallel
# waveform
wf = wfc.Waveform(wf_model_name, wf_other_var_dic)
# lambidified detector reponses and derivatives
drd.generate_det_responses_derivs_sym(
wf,
deriv_symbs_string,
locs=locs,
use_rot=use_rot,
user_lambdified_functions_path=output_path,
)
elif print_progress:
print("All lambdified derivatives already exist.")
if __name__ == "__main__":
# tf2_tidal is used as a replacement for numerical BNS simulations until they become well-conditioned
# TODO: make a user input file somewhere to unify the considered waveforms
wf_model_name, wf_other_var_dic = "tf2_tidal", None
deriv_symbs_string = "Mc eta DL tc phic iota ra dec psi"
# TODO: make this automated by using a locs list from networks.py
locs = ["H", "L", "V", "K", "I", "ET1", "ET2", "ET3", "C", "N", "S"]
use_rot = True
generate_symbolic_derivatives(
wf_model_name,
wf_other_var_dic,
deriv_symbs_string,
locs,
use_rot,
print_progress=False,
)
| 39.539568 | 119 | 0.691776 |
from typing import List, Set, Dict, Tuple, Optional, Union
import os
from gwbench import wf_class as wfc
from gwbench import detector_response_derivatives as drd
def generate_symbolic_derivatives(
wf_model_name: str,
wf_other_var_dic: Optional[Dict[str, str]],
deriv_symbs_string: str,
locs: List[str],
use_rot: bool,
output_path: Optional[str] = None,
print_progress: bool = True,
) -> None:
= [
"par_deriv_WFM_"
+ wf_model_name
+ "_VAR_"
+ deriv_symbs_string.replace(" ", "_")
+ "_DET_"
+ key
+ ".dat"
for key in locs
]
file_names.append(
"par_deriv_WFM_"
+ wf_model_name
+ "_VAR_"
+ deriv_symbs_string.replace(" ra", "")
.replace(" dec", "")
.replace(" psi", "")
.replace(" ", "_")
+ "_DET_"
+ "pl_cr"
+ ".dat"
)
path = "lambdified_functions/"
file_names_existing = [
file_name for file_name in file_names if os.path.isfile(path + file_name)
]
if len(file_names_existing) < len(file_names):
# TODO: make this more efficient and just generate the missing files, or, do it in parallel
# waveform
wf = wfc.Waveform(wf_model_name, wf_other_var_dic)
# lambidified detector reponses and derivatives
drd.generate_det_responses_derivs_sym(
wf,
deriv_symbs_string,
locs=locs,
use_rot=use_rot,
user_lambdified_functions_path=output_path,
)
elif print_progress:
print("All lambdified derivatives already exist.")
if __name__ == "__main__":
# tf2_tidal is used as a replacement for numerical BNS simulations until they become well-conditioned
# TODO: make a user input file somewhere to unify the considered waveforms
wf_model_name, wf_other_var_dic = "tf2_tidal", None
deriv_symbs_string = "Mc eta DL tc phic iota ra dec psi"
# TODO: make this automated by using a locs list from networks.py
locs = ["H", "L", "V", "K", "I", "ET1", "ET2", "ET3", "C", "N", "S"]
use_rot = True
generate_symbolic_derivatives(
wf_model_name,
wf_other_var_dic,
deriv_symbs_string,
locs,
use_rot,
print_progress=False,
)
| true | true |
f72fbb46da1ac696d933485cf3bec183189f023a | 1,181 | py | Python | setup.py | i008/neptune-contrib | 4071c44112da4d7c52ee42cbb1ba937a66e5845b | [
"MIT"
] | null | null | null | setup.py | i008/neptune-contrib | 4071c44112da4d7c52ee42cbb1ba937a66e5845b | [
"MIT"
] | null | null | null | setup.py | i008/neptune-contrib | 4071c44112da4d7c52ee42cbb1ba937a66e5845b | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
def main():
extras = {
'bots': ['python-telegram-bot'],
'hpo': ['scikit-optimize==0.5.2', 'scipy'],
'monitoring': ['scikit-optimize==0.5.2', 'sacred==0.7.5', 'scikit-learn==0.21.3',
'scikit-plot==0.3.7', 'seaborn==0.8.1', 'aif360==0.2.1'],
'versioning': ['boto3', 'numpy'],
'viz': ['altair==2.3.0'],
}
all_deps = []
for group_name in extras:
all_deps += extras[group_name]
extras['all'] = all_deps
base_libs = ['attrdict==2.0.0', 'neptune-client', 'joblib==0.13', 'pandas', 'matplotlib', 'Pillow==5.4.1']
setup(
name='neptune-contrib',
version='0.13.7',
description='Neptune Python library contributions',
author='neptune.ml',
author_email='contact@neptune.ml',
url="https://github.com/neptune-ml/neptune-contrib",
long_description='Neptune Python library contributions',
license='MIT License',
install_requires=base_libs,
extras_require=extras,
packages=find_packages(include=['neptunecontrib*']),
)
if __name__ == "__main__":
main()
| 31.078947 | 110 | 0.580017 | from setuptools import find_packages, setup
def main():
extras = {
'bots': ['python-telegram-bot'],
'hpo': ['scikit-optimize==0.5.2', 'scipy'],
'monitoring': ['scikit-optimize==0.5.2', 'sacred==0.7.5', 'scikit-learn==0.21.3',
'scikit-plot==0.3.7', 'seaborn==0.8.1', 'aif360==0.2.1'],
'versioning': ['boto3', 'numpy'],
'viz': ['altair==2.3.0'],
}
all_deps = []
for group_name in extras:
all_deps += extras[group_name]
extras['all'] = all_deps
base_libs = ['attrdict==2.0.0', 'neptune-client', 'joblib==0.13', 'pandas', 'matplotlib', 'Pillow==5.4.1']
setup(
name='neptune-contrib',
version='0.13.7',
description='Neptune Python library contributions',
author='neptune.ml',
author_email='contact@neptune.ml',
url="https://github.com/neptune-ml/neptune-contrib",
long_description='Neptune Python library contributions',
license='MIT License',
install_requires=base_libs,
extras_require=extras,
packages=find_packages(include=['neptunecontrib*']),
)
if __name__ == "__main__":
main()
| true | true |
f72fbc9ef2815a7c16260374b2af5e47dc631fe1 | 5,099 | py | Python | dev_scripts/chemenv/equivalent_indices.py | frssp/pymatgen | bdd977f065b66191557c7398b31a1571bc541fdb | [
"MIT"
] | 5 | 2019-04-11T20:57:38.000Z | 2021-12-01T05:00:42.000Z | dev_scripts/chemenv/equivalent_indices.py | darnoceloc/pymatgen | 5cc42912a12a265a603df7e34c856561f76edc1f | [
"MIT"
] | 3 | 2017-07-18T01:13:41.000Z | 2019-04-29T18:17:30.000Z | dev_scripts/chemenv/equivalent_indices.py | darnoceloc/pymatgen | 5cc42912a12a265a603df7e34c856561f76edc1f | [
"MIT"
] | 3 | 2019-10-14T19:47:34.000Z | 2020-07-02T08:10:45.000Z | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Development script of the ChemEnv utility to get the equivalent indices of the model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import numpy as np
if __name__ == '__main__':
cg_symbol = 'O:6'
equiv_list = []
# O:6
if cg_symbol == 'O:6':
opposite_points = {0: 1,
1: 0,
2: 3,
3: 2,
4: 5,
5: 4}
perp_plane = {0: [2, 3, 4, 5],
1: [2, 3, 4, 5],
2: [0, 1, 4, 5],
3: [0, 1, 4, 5],
4: [0, 1, 2, 3],
5: [0, 1, 2, 3]}
# 0. any point
for i0 in range(6):
# 1. point opposite to point 0.
i1 = opposite_points[i0]
# 2. one of the 4 points in the perpendicular plane
for i2 in perp_plane[i0]:
# 3. point opposite to point 2.
i3 = opposite_points[i2]
remaining = range(6)
remaining.remove(i0)
remaining.remove(i1)
remaining.remove(i2)
remaining.remove(i3)
# 4. one of the 2 remaining points
for i4 in remaining:
# 5. point opposite to point 4.
i5 = opposite_points[i4]
equiv_list.append([i0, i1, i2, i3, i4, i5])
# PB:7
if cg_symbol == 'PB:7':
for i0 in range(5):
for turn in [1, -1]:
i1 = np.mod(i0+turn, 5)
i2 = np.mod(i1+turn, 5)
i3 = np.mod(i2+turn, 5)
i4 = np.mod(i3+turn, 5)
for i5 in [5, 6]:
i6 = 5 if i5 == 6 else 6
equiv_list.append([i0, i1, i2, i3, i4, i5, i6])
# HB:8
if cg_symbol == 'HB:8':
for i0 in range(6):
for turn in [1, -1]:
i1 = np.mod(i0 + turn, 6)
i2 = np.mod(i1 + turn, 6)
i3 = np.mod(i2 + turn, 6)
i4 = np.mod(i3 + turn, 6)
i5 = np.mod(i4 + turn, 6)
for i6 in [6, 7]:
i7 = 6 if i6 == 7 else 7
equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])
# SBT:8
if cg_symbol == 'SBT:8':
#0. any point on the square face without cap
for i0 in [0, 1, 3, 4]:
#1. point in this square face but also in the triangular plane of point 0
#2. last point in the triangular plane of point 0
if i0 < 3:
i1 = 0 if i0 == 1 else 1
i2 = 2
else:
i1 = 3 if i0 == 4 else 4
i2 = 5
#3.4.5. corresponding points in the opposite triangular plane to the one of points 0.1.2.
i3 = np.mod(i0 + 3, 6)
i4 = np.mod(i1 + 3, 6)
i5 = np.mod(i2 + 3, 6)
#6. cap point opposite to the first point
i6 = 7 if i0 in [1, 4] else 6
#7. last cap point
i7 = 6 if i0 in [1, 4] else 7
equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])
# SA:8
if cg_symbol == 'SA:8':
sf1 = [0, 2, 1, 3]
sf2 = [4, 5, 7, 6]
# 0. any point
for i0 in range(8):
# 1. point opposite to point 0. in the square face
if i0 in [0, 2]:
i1 = i0 + 1
elif i0 in [1, 3]:
i1 = i0 - 1
elif i0 == 4:
i1 = 7
elif i0 == 5:
i1 = 6
elif i0 == 6:
i1 = 5
elif i0 == 7:
i1 = 4
# 2. one of the two last points in the square face
sfleft = list(sf1) if i0 in sf1 else list(sf2)
sfleft.remove(i0)
sfleft.remove(i1)
for i2 in sfleft:
sfleft2 = list(sfleft)
sfleft2.remove(i2)
# 3. last point in the square face
i3 = sfleft2[0]
# 4. point opposite to point 3. and closest to point 0.
i4 = 0
# 3.4.5. corresponding points in the opposite triangular plane to the one of points 0.1.2.
i3 = np.mod(i0 + 3, 6)
i4 = np.mod(i1 + 3, 6)
i5 = np.mod(i2 + 3, 6)
# 6. cap point opposite to the first point
i6 = 7 if i0 in [1, 4] else 6
# 7. last cap point
i7 = 6 if i0 in [1, 4] else 7
equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])
print('Equivalent indices ({:d}) for {} : '.format(len(equiv_list), cg_symbol))
print(equiv_list) | 34.452703 | 110 | 0.442636 |
from __future__ import division, unicode_literals
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import numpy as np
if __name__ == '__main__':
cg_symbol = 'O:6'
equiv_list = []
if cg_symbol == 'O:6':
opposite_points = {0: 1,
1: 0,
2: 3,
3: 2,
4: 5,
5: 4}
perp_plane = {0: [2, 3, 4, 5],
1: [2, 3, 4, 5],
2: [0, 1, 4, 5],
3: [0, 1, 4, 5],
4: [0, 1, 2, 3],
5: [0, 1, 2, 3]}
for i0 in range(6):
i1 = opposite_points[i0]
for i2 in perp_plane[i0]:
i3 = opposite_points[i2]
remaining = range(6)
remaining.remove(i0)
remaining.remove(i1)
remaining.remove(i2)
remaining.remove(i3)
for i4 in remaining:
i5 = opposite_points[i4]
equiv_list.append([i0, i1, i2, i3, i4, i5])
if cg_symbol == 'PB:7':
for i0 in range(5):
for turn in [1, -1]:
i1 = np.mod(i0+turn, 5)
i2 = np.mod(i1+turn, 5)
i3 = np.mod(i2+turn, 5)
i4 = np.mod(i3+turn, 5)
for i5 in [5, 6]:
i6 = 5 if i5 == 6 else 6
equiv_list.append([i0, i1, i2, i3, i4, i5, i6])
if cg_symbol == 'HB:8':
for i0 in range(6):
for turn in [1, -1]:
i1 = np.mod(i0 + turn, 6)
i2 = np.mod(i1 + turn, 6)
i3 = np.mod(i2 + turn, 6)
i4 = np.mod(i3 + turn, 6)
i5 = np.mod(i4 + turn, 6)
for i6 in [6, 7]:
i7 = 6 if i6 == 7 else 7
equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])
if cg_symbol == 'SBT:8':
for i0 in [0, 1, 3, 4]:
if i0 < 3:
i1 = 0 if i0 == 1 else 1
i2 = 2
else:
i1 = 3 if i0 == 4 else 4
i2 = 5
i3 = np.mod(i0 + 3, 6)
i4 = np.mod(i1 + 3, 6)
i5 = np.mod(i2 + 3, 6)
i6 = 7 if i0 in [1, 4] else 6
i7 = 6 if i0 in [1, 4] else 7
equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])
if cg_symbol == 'SA:8':
sf1 = [0, 2, 1, 3]
sf2 = [4, 5, 7, 6]
for i0 in range(8):
if i0 in [0, 2]:
i1 = i0 + 1
elif i0 in [1, 3]:
i1 = i0 - 1
elif i0 == 4:
i1 = 7
elif i0 == 5:
i1 = 6
elif i0 == 6:
i1 = 5
elif i0 == 7:
i1 = 4
sfleft = list(sf1) if i0 in sf1 else list(sf2)
sfleft.remove(i0)
sfleft.remove(i1)
for i2 in sfleft:
sfleft2 = list(sfleft)
sfleft2.remove(i2)
i3 = sfleft2[0]
i4 = 0
i3 = np.mod(i0 + 3, 6)
i4 = np.mod(i1 + 3, 6)
i5 = np.mod(i2 + 3, 6)
i6 = 7 if i0 in [1, 4] else 6
i7 = 6 if i0 in [1, 4] else 7
equiv_list.append([i0, i1, i2, i3, i4, i5, i6, i7])
print('Equivalent indices ({:d}) for {} : '.format(len(equiv_list), cg_symbol))
print(equiv_list) | true | true |
f72fbcceb7f7342d732b521612c2db620aa6ae77 | 15,134 | py | Python | neutron/agent/l3/extensions/qos/fip.py | netsec/neutron | 17f90e17f139dc47eaafa1d3e342eb87ff0f61ed | [
"Apache-2.0"
] | null | null | null | neutron/agent/l3/extensions/qos/fip.py | netsec/neutron | 17f90e17f139dc47eaafa1d3e342eb87ff0f61ed | [
"Apache-2.0"
] | null | null | null | neutron/agent/l3/extensions/qos/fip.py | netsec/neutron | 17f90e17f139dc47eaafa1d3e342eb87ff0f61ed | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.agent import l3_extension
from neutron_lib import constants
from neutron_lib.services.qos import constants as qos_consts
from oslo_concurrency import lockutils
from oslo_log import log as logging
from neutron.agent.l3.extensions.qos import base as qos_base
from neutron.agent.linux import ip_lib
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import coordination
LOG = logging.getLogger(__name__)
class RouterFipRateLimitMaps(qos_base.RateLimitMaps):
LOCK_NAME = "fip-qos-cache"
def __init__(self):
"""Initialize RouterFipRateLimitMaps
The router_floating_ips will be:
router_floating_ips = {
router_id_1: set(fip1, fip2),
router_id_2: set(), # default
}
"""
self.router_floating_ips = {}
"""
The rate limits dict will be:
xxx_ratelimits = {
fip_1: (rate, burst),
fip_2: (IP_DEFAULT_RATE, IP_DEFAULT_BURST), # default
fip_3: (1, 2),
fip_4: (3, 4),
}
"""
self.ingress_ratelimits = {}
self.egress_ratelimits = {}
super(RouterFipRateLimitMaps, self).__init__(self.LOCK_NAME)
def find_fip_router_id(self, fip):
@lockutils.synchronized(self.lock_name)
def _find_fip_router_id():
for router_id, ips in self.router_floating_ips.items():
if fip in ips:
return router_id
return _find_fip_router_id()
def get_router_floating_ips(self, router_id):
@lockutils.synchronized(self.lock_name)
def _get_router_floating_ips():
return self.router_floating_ips.pop(
router_id, [])
return _get_router_floating_ips()
def remove_fip_ratelimit_cache(self, direction, fip):
@lockutils.synchronized(self.lock_name)
def _remove_fip_ratelimit_cache():
rate_limits_direction = direction + "_ratelimits"
rate_limits = getattr(self, rate_limits_direction, {})
rate_limits.pop(fip, None)
_remove_fip_ratelimit_cache()
def set_fip_ratelimit_cache(self, direction, fip, rate, burst):
@lockutils.synchronized(self.lock_name)
def _set_fip_ratelimit_cache():
rate_limits_direction = direction + "_ratelimits"
rate_limits = getattr(self, rate_limits_direction, {})
rate_limits[fip] = (rate, burst)
_set_fip_ratelimit_cache()
def get_fip_ratelimit_cache(self, direction, fip):
@lockutils.synchronized(self.lock_name)
def _get_fip_ratelimit_cache():
rate_limits_direction = direction + "_ratelimits"
rate_limits = getattr(self, rate_limits_direction, {})
rate, burst = rate_limits.get(fip, (qos_base.IP_DEFAULT_RATE,
qos_base.IP_DEFAULT_BURST))
return rate, burst
return _get_fip_ratelimit_cache()
class FipQosAgentExtension(qos_base.L3QosAgentExtensionBase,
l3_extension.L3AgentExtension):
def initialize(self, connection, driver_type):
"""Initialize agent extension."""
self.resource_rpc = resources_rpc.ResourcesPullRpcApi()
self.fip_qos_map = RouterFipRateLimitMaps()
self._register_rpc_consumers()
def _handle_notification(self, context, resource_type,
qos_policies, event_type):
if event_type == events.UPDATED:
for qos_policy in qos_policies:
self._process_update_policy(qos_policy)
def _process_update_policy(self, qos_policy):
old_qos_policy = self.fip_qos_map.get_policy(qos_policy.id)
if old_qos_policy:
if self._policy_rules_modified(old_qos_policy, qos_policy):
for fip in self.fip_qos_map.get_resources(qos_policy):
router_id = self.fip_qos_map.find_fip_router_id(fip)
router_info = self._get_router_info(router_id)
if not router_info:
continue
device = self._get_rate_limit_ip_device(router_info)
dvr_fip_device = self._get_dvr_fip_device(router_info)
if not device and not dvr_fip_device:
LOG.debug("Router %s does not have a floating IP "
"related device, skipping.", router_id)
continue
rates = self.get_policy_rates(qos_policy)
if device:
self.process_ip_rates(fip, device, rates)
if dvr_fip_device:
self.process_ip_rates(
fip, dvr_fip_device, rates, with_cache=False)
self.fip_qos_map.update_policy(qos_policy)
def _remove_fip_rate_limit_cache(self, fip):
for direction in constants.VALID_DIRECTIONS:
self.fip_qos_map.remove_fip_ratelimit_cache(direction, fip)
def _process_reset_fip(self, fip):
self.fip_qos_map.clean_by_resource(fip)
@coordination.synchronized('qos-floating-ip-{ip}')
def process_ip_rate_limit(self, ip, direction,
device, rate, burst):
tc_wrapper = self._get_tc_wrapper(device)
if (rate == qos_base.IP_DEFAULT_RATE and
burst == qos_base.IP_DEFAULT_BURST):
# According to the agreements of default value definition,
# floating IP bandwidth was changed to default value (no limit).
# NOTE: l3_tc_lib will ignore exception FilterIDForIPNotFound.
tc_wrapper.clear_ip_rate_limit(direction, ip)
self.fip_qos_map.remove_fip_ratelimit_cache(direction, ip)
return
# Finally just set it, l3_tc_lib will clean the old rules if exists.
tc_wrapper.set_ip_rate_limit(direction, ip, rate, burst)
def _get_rate_limit_ip_device(self, router_info):
ex_gw_port = router_info.get_ex_gw_port()
if not ex_gw_port:
return
agent_mode = router_info.agent_conf.agent_mode
is_distributed_router = router_info.router.get('distributed')
if is_distributed_router and agent_mode == (
constants.L3_AGENT_MODE_DVR_SNAT):
# DVR edge (or DVR edge ha) router
if not router_info._is_this_snat_host():
return
name = router_info.get_snat_external_device_interface_name(
ex_gw_port)
else:
# DVR local router
# Legacy/HA router
name = router_info.get_external_device_interface_name(ex_gw_port)
if not name:
# DVR local router in dvr_no_external agent mode may not have
# such rfp-device.
return
namespace = router_info.get_gw_ns_name()
return ip_lib.IPDevice(name, namespace=namespace)
def _remove_fip_rate_limit(self, device, fip_ip):
tc_wrapper = self._get_tc_wrapper(device)
for direction in constants.VALID_DIRECTIONS:
if device.exists():
tc_wrapper.clear_ip_rate_limit(direction, fip_ip)
self.fip_qos_map.remove_fip_ratelimit_cache(direction, fip_ip)
def get_fip_qos_rates(self, context, fip, policy_id):
if policy_id is None:
self._process_reset_fip(fip)
# process_ip_rate_limit will treat value 0 as
# cleaning the tc filters if exits or no action.
return {constants.INGRESS_DIRECTION: {
"rate": qos_base.IP_DEFAULT_RATE,
"burst": qos_base.IP_DEFAULT_BURST},
constants.EGRESS_DIRECTION: {
"rate": qos_base.IP_DEFAULT_RATE,
"burst": qos_base.IP_DEFAULT_BURST}}
policy = self.resource_rpc.pull(
context, resources.QOS_POLICY, policy_id)
self.fip_qos_map.set_resource_policy(fip, policy)
return self.get_policy_rates(policy)
def process_ip_rates(self, fip, device, rates, with_cache=True):
for direction in constants.VALID_DIRECTIONS:
rate = rates.get(direction)
if with_cache:
old_rate, old_burst = self.fip_qos_map.get_fip_ratelimit_cache(
direction, fip)
if old_rate == rate['rate'] and old_burst == rate['burst']:
# Two possibilities here:
# 1. Floating IP rate limit does not change.
# 2. Floating IP bandwidth does not limit.
continue
self.process_ip_rate_limit(
fip, direction, device,
rate['rate'], rate['burst'])
self.fip_qos_map.set_fip_ratelimit_cache(
direction, fip, rate['rate'], rate['burst'])
else:
tc_wrapper = self._get_tc_wrapper(device)
if (rate['rate'] == qos_base.IP_DEFAULT_RATE and
rate['burst'] == qos_base.IP_DEFAULT_BURST):
# Default value is no limit
tc_wrapper.clear_ip_rate_limit(direction, fip)
else:
tc_wrapper.set_ip_rate_limit(direction, fip,
rate['rate'], rate['burst'])
def _get_dvr_fip_device(self, router_info):
is_distributed_router = router_info.router.get('distributed')
agent_mode = router_info.agent_conf.agent_mode
if is_distributed_router and agent_mode == (
constants.L3_AGENT_MODE_DVR_SNAT):
gw_port = router_info.get_ex_gw_port()
if gw_port and router_info.fip_ns:
rfp_dev_name = router_info.get_external_device_interface_name(
gw_port)
if router_info.router_namespace.exists() and rfp_dev_name:
return ip_lib.IPDevice(
rfp_dev_name, namespace=router_info.ns_name)
def process_floating_ip_addresses(self, context, router_info):
# Loop all the router floating ips, the corresponding floating IP tc
# rules will be configured:
# 1. for legacy and HA router, it will be all floating IPs to qg-device
# of qrouter-namespace in (all ha router hosted) network node.
# 2. for dvr router, we can do this simple. No matter the agent
# type is dvr or dvr_snat, we can just set all the
# floating IP tc rules to the corresponding device:
# 2.1 for dvr local router in compute node:
# the namespace is qrouter-x, and the device is rfp-device.
# 2.2 for dvr edge (ha) router in network node:
# the namespace is snat-x, and the device is qg-device.
# 3. for dvr local router, if agent_mod is dvr_no_external, no
# floating IP rules will be configured.
# 4. for dvr router in snat node, we should process the floating
# IP QoS again in qrouter-namespace to cover the mixed deployment
# with nova-compute scenario.
is_distributed_router = router_info.router.get('distributed')
agent_mode = router_info.agent_conf.agent_mode
LOG.debug("Start processing floating IP QoS for "
"router %(router_id)s, router "
"distributed: %(distributed)s, "
"agent mode: %(agent_mode)s",
{"router_id": router_info.router_id,
"distributed": is_distributed_router,
"agent_mode": agent_mode})
if is_distributed_router and agent_mode == (
constants.L3_AGENT_MODE_DVR_NO_EXTERNAL):
# condition 3: dvr local router and dvr_no_external agent
return
device = self._get_rate_limit_ip_device(router_info)
dvr_fip_device = self._get_dvr_fip_device(router_info)
if not device and not dvr_fip_device:
LOG.debug("No relevant QoS device found "
"for router: %s", router_info.router_id)
return
floating_ips = (router_info.get_floating_ips() +
router_info.get_port_forwarding_fips())
current_fips = self.fip_qos_map.router_floating_ips.get(
router_info.router_id, set())
new_fips = set()
for fip in floating_ips:
fip_addr = fip['floating_ip_address']
new_fips.add(fip_addr)
rates = self.get_fip_qos_rates(context,
fip_addr,
fip.get(qos_consts.QOS_POLICY_ID))
if device:
self.process_ip_rates(fip_addr, device, rates)
if dvr_fip_device:
# NOTE(liuyulong): for scenario 4 (mixed dvr_snat and compute
# node), because floating IP qos rates may have been
# processed in dvr snat-namespace, so here the cache was
# already set. We just install the rules to the device in
# qrouter-namesapce.
self.process_ip_rates(
fip_addr, dvr_fip_device, rates, with_cache=False)
self.fip_qos_map.router_floating_ips[router_info.router_id] = new_fips
fips_removed = current_fips - new_fips
for fip in fips_removed:
if device:
self._remove_fip_rate_limit(device, fip)
if dvr_fip_device:
self._remove_fip_rate_limit(dvr_fip_device, fip)
self._process_reset_fip(fip)
def add_router(self, context, data):
router_info = self._get_router_info(data['id'])
if router_info:
self.process_floating_ip_addresses(context, router_info)
def update_router(self, context, data):
router_info = self._get_router_info(data['id'])
if router_info:
self.process_floating_ip_addresses(context, router_info)
def delete_router(self, context, data):
# NOTE(liuyulong): to delete the router, you need to disassociate the
# floating IP first, so the update_router has done the cache clean.
pass
def ha_state_change(self, context, data):
pass
| 43.24 | 79 | 0.619466 |
from neutron_lib.agent import l3_extension
from neutron_lib import constants
from neutron_lib.services.qos import constants as qos_consts
from oslo_concurrency import lockutils
from oslo_log import log as logging
from neutron.agent.l3.extensions.qos import base as qos_base
from neutron.agent.linux import ip_lib
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import coordination
LOG = logging.getLogger(__name__)
class RouterFipRateLimitMaps(qos_base.RateLimitMaps):
LOCK_NAME = "fip-qos-cache"
def __init__(self):
self.router_floating_ips = {}
self.ingress_ratelimits = {}
self.egress_ratelimits = {}
super(RouterFipRateLimitMaps, self).__init__(self.LOCK_NAME)
def find_fip_router_id(self, fip):
@lockutils.synchronized(self.lock_name)
def _find_fip_router_id():
for router_id, ips in self.router_floating_ips.items():
if fip in ips:
return router_id
return _find_fip_router_id()
def get_router_floating_ips(self, router_id):
@lockutils.synchronized(self.lock_name)
def _get_router_floating_ips():
return self.router_floating_ips.pop(
router_id, [])
return _get_router_floating_ips()
def remove_fip_ratelimit_cache(self, direction, fip):
@lockutils.synchronized(self.lock_name)
def _remove_fip_ratelimit_cache():
rate_limits_direction = direction + "_ratelimits"
rate_limits = getattr(self, rate_limits_direction, {})
rate_limits.pop(fip, None)
_remove_fip_ratelimit_cache()
def set_fip_ratelimit_cache(self, direction, fip, rate, burst):
@lockutils.synchronized(self.lock_name)
def _set_fip_ratelimit_cache():
rate_limits_direction = direction + "_ratelimits"
rate_limits = getattr(self, rate_limits_direction, {})
rate_limits[fip] = (rate, burst)
_set_fip_ratelimit_cache()
def get_fip_ratelimit_cache(self, direction, fip):
@lockutils.synchronized(self.lock_name)
def _get_fip_ratelimit_cache():
rate_limits_direction = direction + "_ratelimits"
rate_limits = getattr(self, rate_limits_direction, {})
rate, burst = rate_limits.get(fip, (qos_base.IP_DEFAULT_RATE,
qos_base.IP_DEFAULT_BURST))
return rate, burst
return _get_fip_ratelimit_cache()
class FipQosAgentExtension(qos_base.L3QosAgentExtensionBase,
l3_extension.L3AgentExtension):
def initialize(self, connection, driver_type):
self.resource_rpc = resources_rpc.ResourcesPullRpcApi()
self.fip_qos_map = RouterFipRateLimitMaps()
self._register_rpc_consumers()
def _handle_notification(self, context, resource_type,
qos_policies, event_type):
if event_type == events.UPDATED:
for qos_policy in qos_policies:
self._process_update_policy(qos_policy)
def _process_update_policy(self, qos_policy):
old_qos_policy = self.fip_qos_map.get_policy(qos_policy.id)
if old_qos_policy:
if self._policy_rules_modified(old_qos_policy, qos_policy):
for fip in self.fip_qos_map.get_resources(qos_policy):
router_id = self.fip_qos_map.find_fip_router_id(fip)
router_info = self._get_router_info(router_id)
if not router_info:
continue
device = self._get_rate_limit_ip_device(router_info)
dvr_fip_device = self._get_dvr_fip_device(router_info)
if not device and not dvr_fip_device:
LOG.debug("Router %s does not have a floating IP "
"related device, skipping.", router_id)
continue
rates = self.get_policy_rates(qos_policy)
if device:
self.process_ip_rates(fip, device, rates)
if dvr_fip_device:
self.process_ip_rates(
fip, dvr_fip_device, rates, with_cache=False)
self.fip_qos_map.update_policy(qos_policy)
def _remove_fip_rate_limit_cache(self, fip):
for direction in constants.VALID_DIRECTIONS:
self.fip_qos_map.remove_fip_ratelimit_cache(direction, fip)
def _process_reset_fip(self, fip):
self.fip_qos_map.clean_by_resource(fip)
@coordination.synchronized('qos-floating-ip-{ip}')
def process_ip_rate_limit(self, ip, direction,
device, rate, burst):
tc_wrapper = self._get_tc_wrapper(device)
if (rate == qos_base.IP_DEFAULT_RATE and
burst == qos_base.IP_DEFAULT_BURST):
tc_wrapper.clear_ip_rate_limit(direction, ip)
self.fip_qos_map.remove_fip_ratelimit_cache(direction, ip)
return
tc_wrapper.set_ip_rate_limit(direction, ip, rate, burst)
def _get_rate_limit_ip_device(self, router_info):
ex_gw_port = router_info.get_ex_gw_port()
if not ex_gw_port:
return
agent_mode = router_info.agent_conf.agent_mode
is_distributed_router = router_info.router.get('distributed')
if is_distributed_router and agent_mode == (
constants.L3_AGENT_MODE_DVR_SNAT):
if not router_info._is_this_snat_host():
return
name = router_info.get_snat_external_device_interface_name(
ex_gw_port)
else:
name = router_info.get_external_device_interface_name(ex_gw_port)
if not name:
return
namespace = router_info.get_gw_ns_name()
return ip_lib.IPDevice(name, namespace=namespace)
def _remove_fip_rate_limit(self, device, fip_ip):
tc_wrapper = self._get_tc_wrapper(device)
for direction in constants.VALID_DIRECTIONS:
if device.exists():
tc_wrapper.clear_ip_rate_limit(direction, fip_ip)
self.fip_qos_map.remove_fip_ratelimit_cache(direction, fip_ip)
def get_fip_qos_rates(self, context, fip, policy_id):
if policy_id is None:
self._process_reset_fip(fip)
return {constants.INGRESS_DIRECTION: {
"rate": qos_base.IP_DEFAULT_RATE,
"burst": qos_base.IP_DEFAULT_BURST},
constants.EGRESS_DIRECTION: {
"rate": qos_base.IP_DEFAULT_RATE,
"burst": qos_base.IP_DEFAULT_BURST}}
policy = self.resource_rpc.pull(
context, resources.QOS_POLICY, policy_id)
self.fip_qos_map.set_resource_policy(fip, policy)
return self.get_policy_rates(policy)
def process_ip_rates(self, fip, device, rates, with_cache=True):
for direction in constants.VALID_DIRECTIONS:
rate = rates.get(direction)
if with_cache:
old_rate, old_burst = self.fip_qos_map.get_fip_ratelimit_cache(
direction, fip)
if old_rate == rate['rate'] and old_burst == rate['burst']:
continue
self.process_ip_rate_limit(
fip, direction, device,
rate['rate'], rate['burst'])
self.fip_qos_map.set_fip_ratelimit_cache(
direction, fip, rate['rate'], rate['burst'])
else:
tc_wrapper = self._get_tc_wrapper(device)
if (rate['rate'] == qos_base.IP_DEFAULT_RATE and
rate['burst'] == qos_base.IP_DEFAULT_BURST):
tc_wrapper.clear_ip_rate_limit(direction, fip)
else:
tc_wrapper.set_ip_rate_limit(direction, fip,
rate['rate'], rate['burst'])
def _get_dvr_fip_device(self, router_info):
is_distributed_router = router_info.router.get('distributed')
agent_mode = router_info.agent_conf.agent_mode
if is_distributed_router and agent_mode == (
constants.L3_AGENT_MODE_DVR_SNAT):
gw_port = router_info.get_ex_gw_port()
if gw_port and router_info.fip_ns:
rfp_dev_name = router_info.get_external_device_interface_name(
gw_port)
if router_info.router_namespace.exists() and rfp_dev_name:
return ip_lib.IPDevice(
rfp_dev_name, namespace=router_info.ns_name)
def process_floating_ip_addresses(self, context, router_info):
is_distributed_router = router_info.router.get('distributed')
agent_mode = router_info.agent_conf.agent_mode
LOG.debug("Start processing floating IP QoS for "
"router %(router_id)s, router "
"distributed: %(distributed)s, "
"agent mode: %(agent_mode)s",
{"router_id": router_info.router_id,
"distributed": is_distributed_router,
"agent_mode": agent_mode})
if is_distributed_router and agent_mode == (
constants.L3_AGENT_MODE_DVR_NO_EXTERNAL):
return
device = self._get_rate_limit_ip_device(router_info)
dvr_fip_device = self._get_dvr_fip_device(router_info)
if not device and not dvr_fip_device:
LOG.debug("No relevant QoS device found "
"for router: %s", router_info.router_id)
return
floating_ips = (router_info.get_floating_ips() +
router_info.get_port_forwarding_fips())
current_fips = self.fip_qos_map.router_floating_ips.get(
router_info.router_id, set())
new_fips = set()
for fip in floating_ips:
fip_addr = fip['floating_ip_address']
new_fips.add(fip_addr)
rates = self.get_fip_qos_rates(context,
fip_addr,
fip.get(qos_consts.QOS_POLICY_ID))
if device:
self.process_ip_rates(fip_addr, device, rates)
if dvr_fip_device:
self.process_ip_rates(
fip_addr, dvr_fip_device, rates, with_cache=False)
self.fip_qos_map.router_floating_ips[router_info.router_id] = new_fips
fips_removed = current_fips - new_fips
for fip in fips_removed:
if device:
self._remove_fip_rate_limit(device, fip)
if dvr_fip_device:
self._remove_fip_rate_limit(dvr_fip_device, fip)
self._process_reset_fip(fip)
def add_router(self, context, data):
router_info = self._get_router_info(data['id'])
if router_info:
self.process_floating_ip_addresses(context, router_info)
def update_router(self, context, data):
router_info = self._get_router_info(data['id'])
if router_info:
self.process_floating_ip_addresses(context, router_info)
def delete_router(self, context, data):
pass
def ha_state_change(self, context, data):
pass
| true | true |
f72fbced7d7530fbca0bf7e3bb8adb613862ba38 | 183 | py | Python | Euler/Problem_16.py | ChristensenCode/energy-viking | 7a720cbcfabcb020ed42d52462bfad4058b0c20f | [
"MIT"
] | null | null | null | Euler/Problem_16.py | ChristensenCode/energy-viking | 7a720cbcfabcb020ed42d52462bfad4058b0c20f | [
"MIT"
] | null | null | null | Euler/Problem_16.py | ChristensenCode/energy-viking | 7a720cbcfabcb020ed42d52462bfad4058b0c20f | [
"MIT"
] | null | null | null | # Problem 16 Power Digit Sum
x = 2**1000
print(x)
value = str(2**1000)
totalling = []
for i in range(len(value)):
total = int(value[i])
totalling.append(total)
print(sum(totalling)) | 20.333333 | 28 | 0.688525 |
x = 2**1000
print(x)
value = str(2**1000)
totalling = []
for i in range(len(value)):
total = int(value[i])
totalling.append(total)
print(sum(totalling)) | true | true |
f72fbf0efa739283e1861ea301b93db9d409887a | 1,174 | py | Python | neutron/db/migration/alembic_migrations/versions/1f71e54a85e7_ml2_net_seg_model.py | gampel/neutron | 51a6260266dc59c066072ca890ad9c40b1aad6cf | [
"Apache-2.0"
] | 10 | 2015-09-22T10:22:53.000Z | 2016-02-25T06:12:05.000Z | neutron/db/migration/alembic_migrations/versions/1f71e54a85e7_ml2_net_seg_model.py | gampel/neutron | 51a6260266dc59c066072ca890ad9c40b1aad6cf | [
"Apache-2.0"
] | 12 | 2015-01-08T18:30:45.000Z | 2015-03-13T21:04:15.000Z | neutron/db/migration/alembic_migrations/versions/1f71e54a85e7_ml2_net_seg_model.py | gampel/neutron | 51a6260266dc59c066072ca890ad9c40b1aad6cf | [
"Apache-2.0"
] | 7 | 2015-02-05T10:23:52.000Z | 2019-05-18T17:11:19.000Z | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""ml2_network_segments models change for multi-segment network.
Revision ID: 1f71e54a85e7
Revises: 44621190bc02
Create Date: 2014-10-15 18:30:51.395295
"""
# revision identifiers, used by Alembic.
revision = '1f71e54a85e7'
down_revision = '44621190bc02'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('ml2_network_segments',
sa.Column('segment_index', sa.Integer(), nullable=False,
server_default='0'))
def downgrade():
op.drop_column('ml2_network_segments', 'segment_index')
| 28.634146 | 78 | 0.721465 |
revision = '1f71e54a85e7'
down_revision = '44621190bc02'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('ml2_network_segments',
sa.Column('segment_index', sa.Integer(), nullable=False,
server_default='0'))
def downgrade():
op.drop_column('ml2_network_segments', 'segment_index')
| true | true |
f72fbf450177a71f28c55fbc60c587342b06a61a | 26,495 | py | Python | scikits/crab/recommenders/knn/classes.py | MostaSchoolOfAI/crab | 1c1fc21e902e4ee422ab367d691df16978972f8c | [
"BSD-3-Clause"
] | null | null | null | scikits/crab/recommenders/knn/classes.py | MostaSchoolOfAI/crab | 1c1fc21e902e4ee422ab367d691df16978972f8c | [
"BSD-3-Clause"
] | null | null | null | scikits/crab/recommenders/knn/classes.py | MostaSchoolOfAI/crab | 1c1fc21e902e4ee422ab367d691df16978972f8c | [
"BSD-3-Clause"
] | null | null | null | """
Generalized Recommender models.
This module contains basic memory recommender interfaces used throughout
the whole scikit-crab package.
The interfaces are realized as abstract base classes (ie., some optional
functionality is provided in the interface itself, so that the interfaces
can be subclassed).
"""
# Author: Marcel Caraciolo <marcel@muricoca.com>
#
# License: BSD Style.
from sklearn.base import BaseEstimator
from .base import ItemRecommender, UserRecommender
from .item_strategies import ItemsNeighborhoodStrategy
from .neighborhood_strategies import NearestNeighborsStrategy
import numpy as np
class ItemBasedRecommender(ItemRecommender):
"""
Item Based Collaborative Filtering Recommender.
Parameters
-----------
data_model: The data model instance that will be data source
for the recommender.
similarity: The Item Similarity instance that will be used to
score the items that will be recommended.
items_selection_strategy: The item candidates strategy that you
can choose for selecting the possible items to recommend.
default = ItemsNeighborhoodStrategy
capper: bool (default=True)
Cap the preferences with maximum and minimum preferences
in the model.
with_preference: bool (default=False)
Return the recommendations with the estimated preferences if True.
Attributes
-----------
`model`: The data model instance that will be data source
for the recommender.
`similarity`: The Item Similarity instance that will be used to
score the items that will be recommended.
`items_selection_strategy`: The item candidates strategy that you
can choose for selecting the possible items to recommend.
default = ItemsNeighborhoodStrategy
`capper`: bool (default=True)
Cap the preferences with maximum and minimum preferences
in the model.
`with_preference`: bool (default=False)
Return the recommendations with the estimated preferences if True.
Examples
-----------
>>> from scikits.crab.models.classes import MatrixPreferenceDataModel
>>> from scikits.crab.recommenders.knn.classes import ItemBasedRecommender
>>> from scikits.crab.similarities.basic_similarities import ItemSimilarity
>>> from scikits.crab.recommenders.knn.item_strategies import ItemsNeighborhoodStrategy
>>> from scikits.crab.metrics.pairwise import euclidean_distances
>>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \
'Snakes on a Plane': 3.5, \
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \
'The Night Listener': 3.0}, \
'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 3.5}, \
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \
'Superman Returns': 3.5, 'The Night Listener': 4.0}, \
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \
'The Night Listener': 4.5, 'Superman Returns': 4.0, \
'You, Me and Dupree': 2.5}, \
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 2.0}, \
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'The Night Listener': 3.0, 'Superman Returns': 5.0, \
'You, Me and Dupree': 3.5}, \
'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \
'Superman Returns':4.0}, \
'Maria Gabriela': {}}
>>> model = MatrixPreferenceDataModel(movies)
>>> items_strategy = ItemsNeighborhoodStrategy()
>>> similarity = ItemSimilarity(model, euclidean_distances)
>>> recsys = ItemBasedRecommender(model, similarity, items_strategy)
>>> #Return the recommendations for the given user.
>>> recsys.recommend('Leopoldo Pires')
['Just My Luck', 'You, Me and Dupree']
>>> #Return the 2 explanations for the given recommendation.
>>> recsys.recommended_because('Leopoldo Pires', 'Just My Luck',2)
['The Night Listener', 'Superman Returns']
Notes
-----------
This ItemBasedRecommender does not yet provide
suppot for rescorer functions.
References
-----------
Item-based collaborative filtering recommendation algorithms by Sarwar
http://portal.acm.org/citation.cfm?id=372071
"""
def __init__(self, model, similarity, items_selection_strategy=None,
capper=True, with_preference=False):
ItemRecommender.__init__(self, model, with_preference)
self.similarity = similarity
self.capper = capper
if items_selection_strategy is None:
self.items_selection_strategy = ItemsNeighborhoodStrategy()
else:
self.items_selection_strategy = items_selection_strategy
def recommend(self, user_id, how_many=None, **params):
'''
Return a list of recommended items, ordered from most strongly
recommend to least.
Parameters
----------
user_id: int or string
User for which recommendations are to be computed.
how_many: int
Desired number of recommendations (default=None ALL)
'''
self._set_params(**params)
candidate_items = self.all_other_items(user_id)
recommendable_items = self._top_matches(user_id, \
candidate_items, how_many)
return recommendable_items
def estimate_preference(self, user_id, item_id, **params):
'''
Parameters
----------
user_id: int or string
User for which recommendations are to be computed.
item_id: int or string
ID of item for which wants to find the estimated preference.
Returns
-------
Return an estimated preference if the user has not expressed a
preference for the item, or else the user's actual preference for the
item. If a preference cannot be estimated, returns None.
'''
preference = self.model.preference_value(user_id, item_id)
if not np.isnan(preference):
return preference
#TODO: It needs optimization
prefs = self.model.preferences_from_user(user_id)
if not self.model.has_preference_values():
prefs = [(pref, 1.0) for pref in prefs]
similarities = \
np.array([self.similarity.get_similarity(item_id, to_item_id) \
for to_item_id, pref in prefs if to_item_id != item_id]).flatten()
prefs = np.array([pref for it, pref in prefs])
prefs_sim = np.sum(prefs[~np.isnan(similarities)] *
similarities[~np.isnan(similarities)])
total_similarity = np.sum(similarities)
#Throw out the estimate if it was based on no data points,
#of course, but also if based on
#just one. This is a bit of a band-aid on the 'stock'
#item-based algorithm for the moment.
#The reason is that in this case the estimate is, simply,
#the user's rating for one item
#that happened to have a defined similarity.
#The similarity score doesn't matter, and that
#seems like a bad situation.
if total_similarity == 0.0 or \
not similarities[~np.isnan(similarities)].size:
return np.nan
estimated = prefs_sim / total_similarity
if self.capper:
max_p = self.model.maximum_preference_value()
min_p = self.model.minimum_preference_value()
estimated = max_p if estimated > max_p else min_p \
if estimated < min_p else estimated
return estimated
def all_other_items(self, user_id, **params):
'''
Parameters
----------
user_id: int or string
User for which recommendations are to be computed.
Returns
---------
Return items in the `model` for which the user has not expressed
the preference and could possibly be recommended to the user.
'''
return self.items_selection_strategy.candidate_items(user_id, \
self.model)
def _top_matches(self, source_id, target_ids, how_many=None, **params):
'''
Parameters
----------
target_ids: array of shape [n_target_ids]
source_id: int or string
item id to compare against.
how_many: int
Desired number of most top items to recommend (default=None ALL)
Returns
--------
Return the top N matches
It can be user_ids or item_ids.
'''
#Empty target_ids
if target_ids.size == 0:
return np.array([])
estimate_preferences = np.vectorize(self.estimate_preference)
preferences = estimate_preferences(source_id, target_ids)
preference_values = preferences[~np.isnan(preferences)]
target_ids = target_ids[~np.isnan(preferences)]
sorted_preferences = np.lexsort((preference_values,))[::-1]
sorted_preferences = sorted_preferences[0:how_many] \
if how_many and sorted_preferences.size > how_many \
else sorted_preferences
if self.with_preference:
top_n_recs = [(target_ids[ind], \
preferences[ind]) for ind in sorted_preferences]
else:
top_n_recs = [target_ids[ind]
for ind in sorted_preferences]
return top_n_recs
def most_similar_items(self, item_id, how_many=None):
'''
Return the most similar items to the given item, ordered
from most similar to least.
Parameters
-----------
item_id: int or string
ID of item for which to find most similar other items
how_many: int
Desired number of most similar items to find (default=None ALL)
'''
old_how_many = self.similarity.num_best
#+1 since it returns the identity.
self.similarity.num_best = how_many + 1 \
if how_many is not None else None
similarities = self.similarity[item_id]
self.similarity.num_best = old_how_many
return np.array([item for item, pref in similarities \
if item != item_id and not np.isnan(pref)])
def recommended_because(self, user_id, item_id, how_many=None, **params):
'''
Returns the items that were most influential in recommending a
given item to a given user. In most implementations, this
method will return items that the user prefers and that
are similar to the given item.
Parameters
-----------
user_id : int or string
ID of the user who was recommended the item
item_id: int or string
ID of item that was recommended
how_many: int
Maximum number of items to return (default=None ALL)
Returns
----------
The list of items ordered from most influential in
recommended the given item to least
'''
preferences = self.model.preferences_from_user(user_id)
if self.model.has_preference_values():
similarities = \
np.array([self.similarity.get_similarity(item_id, to_item_id) \
for to_item_id, pref in preferences
if to_item_id != item_id]).flatten()
prefs = np.array([pref for it, pref in preferences])
item_ids = np.array([it for it, pref in preferences])
else:
similarities = \
np.array([self.similarity.get_similarity(item_id, to_item_id) \
for to_item_id in preferences
if to_item_id != item_id]).flatten()
prefs = np.array([1.0 for it in preferences])
item_ids = np.array(preferences)
scores = prefs[~np.isnan(similarities)] * \
(1.0 + similarities[~np.isnan(similarities)])
sorted_preferences = np.lexsort((scores,))[::-1]
sorted_preferences = sorted_preferences[0:how_many] \
if how_many and sorted_preferences.size > how_many \
else sorted_preferences
if self.with_preference:
top_n_recs = [(item_ids[ind], \
prefs[ind]) for ind in sorted_preferences]
else:
top_n_recs = [item_ids[ind]
for ind in sorted_preferences]
return top_n_recs
#=====================
#User Based Recommender
class UserBasedRecommender(UserRecommender):
"""
User Based Collaborative Filtering Recommender.
Parameters
-----------
data_model: The data model instance that will be data source
for the recommender.
similarity: The User Similarity instance that will be used to
score the users that are the most similar to the user.
neighborhood_strategy: The user neighborhood strategy that you
can choose for selecting the most similar users to find
the items to recommend.
default = NearestNeighborsStrategy
capper: bool (default=True)
Cap the preferences with maximum and minimum preferences
in the model.
with_preference: bool (default=False)
Return the recommendations with the estimated preferences if True.
Attributes
-----------
`model`: The data model instance that will be data source
for the recommender.
`similarity`: The User Similarity instance that will be used to
score the users that are the most similar to the user.
`neighborhood_strategy`: The user neighborhood strategy that you
can choose for selecting the most similar users to find
the items to recommend.
default = NearestNeighborsStrategy
`capper`: bool (default=True)
Cap the preferences with maximum and minimum preferences
in the model.
`with_preference`: bool (default=False)
Return the recommendations with the estimated preferences if True.
Examples
-----------
>>> from scikits.crab.models.classes import MatrixPreferenceDataModel
>>> from scikits.crab.recommenders.knn.classes import UserBasedRecommender
>>> from scikits.crab.similarities.basic_similarities import UserSimilarity
>>> from scikits.crab.recommenders.knn.neighborhood_strategies import NearestNeighborsStrategy
>>> from scikits.crab.metrics.pairwise import euclidean_distances
>>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \
'Snakes on a Plane': 3.5, \
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \
'The Night Listener': 3.0}, \
'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 3.5}, \
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \
'Superman Returns': 3.5, 'The Night Listener': 4.0}, \
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \
'The Night Listener': 4.5, 'Superman Returns': 4.0, \
'You, Me and Dupree': 2.5}, \
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 2.0}, \
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'The Night Listener': 3.0, 'Superman Returns': 5.0, \
'You, Me and Dupree': 3.5}, \
'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \
'Superman Returns':4.0}, \
'Maria Gabriela': {}}
>>> model = MatrixPreferenceDataModel(movies)
>>> nhood_strategy = NearestNeighborsStrategy()
>>> similarity = UserSimilarity(model, euclidean_distances)
>>> recsys = UserBasedRecommender(model, similarity, nhood_strategy)
>>> #Return the recommendations for the given user.
>>> recsys.recommend('Leopoldo Pires')
['Just My Luck', 'You, Me and Dupree']
>>> #Return the 2 explanations for the given recommendation.
>>> recsys.recommended_because('Leopoldo Pires', 'Just My Luck',2)
['Lorena Abreu', 'Marcel Caraciolo']
Notes
-----------
This UserBasedRecommender does not yet provide
suppot for rescorer functions.
References
-----------
User-based collaborative filtering recommendation algorithms by
"""
def __init__(self, model, similarity, neighborhood_strategy=None,
capper=True, with_preference=False):
UserRecommender.__init__(self, model, with_preference)
self.similarity = similarity
self.capper = capper
if neighborhood_strategy is None:
self.neighborhood_strategy = NearestNeighborsStrategy()
else:
self.neighborhood_strategy = neighborhood_strategy
def all_other_items(self, user_id, **params):
'''
Parameters
----------
user_id: int or string
User for which recommendations are to be computed. (default= 'user_similarity')
Optional Parameters
--------------------
n_similarity: string
The similarity used in the neighborhood strategy
distance: the metrics.pairwise function to set.
The pairwise function to compute the similarity (default = euclidean_distances)
nhood_size: int
The neighborhood size (default=None ALL)
minimal_similarity: float
minimal similarity required for neighbors (default = 0.0)
sampling_rate: int
percentage of users to consider when building neighborhood
(default = 1)
Returns
---------
Return items in the `model` for which the user has not expressed
the preference and could possibly be recommended to the user.
'''
n_similarity = params.pop('n_similarity', 'user_similarity')
distance = params.pop('distance', self.similarity.distance)
nhood_size = params.pop('nhood_size', None)
nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,
self.model, n_similarity, distance, nhood_size, **params)
items_from_user_id = self.model.items_from_user(user_id)
possible_items = []
for to_user_id in nearest_neighbors:
possible_items.extend(self.model.items_from_user(to_user_id))
possible_items = np.unique(np.array(possible_items).flatten())
return np.setdiff1d(possible_items, items_from_user_id)
def estimate_preference(self, user_id, item_id, **params):
'''
Parameters
----------
user_id: int or string
User for which recommendations are to be computed.
item_id: int or string
ID of item for which wants to find the estimated preference.
Returns
-------
Return an estimated preference if the user has not expressed a
preference for the item, or else the user's actual preference for the
item. If a preference cannot be estimated, returns None.
'''
preference = self.model.preference_value(user_id, item_id)
if not np.isnan(preference):
return preference
n_similarity = params.pop('n_similarity', 'user_similarity')
distance = params.pop('distance', self.similarity.distance)
nhood_size = params.pop('nhood_size', None)
nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,
self.model, n_similarity, distance, nhood_size, **params)
preference = 0.0
total_similarity = 0.0
similarities = np.array([self.similarity.get_similarity(user_id, to_user_id)
for to_user_id in nearest_neighbors]).flatten()
prefs = np.array([self.model.preference_value(to_user_id, item_id)
for to_user_id in nearest_neighbors])
# prefs = prefs[~np.isnan(prefs)]
# similarities = similarities[~np.isnan(prefs)]
prefs_sim = np.sum(prefs[~np.isnan(similarities)] *
similarities[~np.isnan(similarities)])
total_similarity = np.sum(similarities)
#Throw out the estimate if it was based on no data points,
#of course, but also if based on just one. This is a bit
#of a band-aid on the 'stock' item-based algorithm for
#the moment. The reason is that in this case the estimate
#is, simply, the user's rating for one item that happened
#to have a defined similarity. The similarity score doesn't
#matter, and that seems like a bad situation.
if total_similarity == 0.0 or \
not similarities[~np.isnan(similarities)].size:
return np.nan
estimated = prefs_sim / total_similarity
if self.capper:
max_p = self.model.maximum_preference_value()
min_p = self.model.minimum_preference_value()
estimated = max_p if estimated > max_p else min_p \
if estimated < min_p else estimated
return estimated
def most_similar_users(self, user_id, how_many=None):
'''
Return the most similar users to the given user, ordered
from most similar to least.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
how_many: int
Desired number of most similar users to find (default=None ALL)
'''
old_how_many = self.similarity.num_best
#+1 since it returns the identity.
self.similarity.num_best = how_many + 1 \
if how_many is not None else None
similarities = self.similarity[user_id]
self.similarity.num_best = old_how_many
return np.array([to_user_id for to_user_id, pref in similarities \
if user_id != to_user_id and not np.isnan(pref)])
def recommend(self, user_id, how_many=None, **params):
'''
Return a list of recommended items, ordered from most strongly
recommend to least.
Parameters
----------
user_id: int or string
User for which recommendations are to be computed.
how_many: int
Desired number of recommendations (default=None ALL)
'''
self.set_params(**params)
candidate_items = self.all_other_items(user_id, **params)
recommendable_items = self._top_matches(user_id, \
candidate_items, how_many)
return recommendable_items
def _top_matches(self, source_id, target_ids, how_many=None, **params):
'''
Parameters
----------
target_ids: array of shape [n_target_ids]
source_id: int or string
item id to compare against.
how_many: int
Desired number of most top items to recommend (default=None ALL)
Returns
--------
Return the top N matches
It can be user_ids or item_ids.
'''
#Empty target_ids
if target_ids.size == 0:
return np.array([])
estimate_preferences = np.vectorize(self.estimate_preference)
preferences = estimate_preferences(source_id, target_ids)
preference_values = preferences[~np.isnan(preferences)]
target_ids = target_ids[~np.isnan(preferences)]
sorted_preferences = np.lexsort((preference_values,))[::-1]
sorted_preferences = sorted_preferences[0:how_many] \
if how_many and sorted_preferences.size > how_many \
else sorted_preferences
if self.with_preference:
top_n_recs = [(target_ids[ind], \
preferences[ind]) for ind in sorted_preferences]
else:
top_n_recs = [target_ids[ind]
for ind in sorted_preferences]
return top_n_recs
def recommended_because(self, user_id, item_id, how_many=None, **params):
'''
Returns the users that were most influential in recommending a
given item to a given user. In most implementations, this
method will return users that prefers the recommended item and that
are similar to the given user.
Parameters
-----------
user_id : int or string
ID of the user who was recommended the item
item_id: int or string
ID of item that was recommended
how_many: int
Maximum number of items to return (default=None ALL)
Returns
----------
The list of items ordered from most influential in
recommended the given item to least
'''
preferences = self.model.preferences_for_item(item_id)
if self.model.has_preference_values():
similarities = \
np.array([self.similarity.get_similarity(user_id, to_user_id) \
for to_user_id, pref in preferences
if to_user_id != user_id]).flatten()
prefs = np.array([pref for it, pref in preferences])
user_ids = np.array([usr for usr, pref in preferences])
else:
similarities = \
np.array([self.similarity.get_similarity(user_id, to_user_id) \
for to_user_id in preferences
if to_user_id != user_id]).flatten()
prefs = np.array([1.0 for it in preferences])
user_ids = np.array(preferences)
scores = prefs[~np.isnan(similarities)] * \
(1.0 + similarities[~np.isnan(similarities)])
sorted_preferences = np.lexsort((scores,))[::-1]
sorted_preferences = sorted_preferences[0:how_many] \
if how_many and sorted_preferences.size > how_many \
else sorted_preferences
if self.with_preference:
top_n_recs = [(user_ids[ind], \
prefs[ind]) for ind in sorted_preferences]
else:
top_n_recs = [user_ids[ind]
for ind in sorted_preferences]
return top_n_recs
| 37.316901 | 98 | 0.627062 |
from sklearn.base import BaseEstimator
from .base import ItemRecommender, UserRecommender
from .item_strategies import ItemsNeighborhoodStrategy
from .neighborhood_strategies import NearestNeighborsStrategy
import numpy as np
class ItemBasedRecommender(ItemRecommender):
def __init__(self, model, similarity, items_selection_strategy=None,
capper=True, with_preference=False):
ItemRecommender.__init__(self, model, with_preference)
self.similarity = similarity
self.capper = capper
if items_selection_strategy is None:
self.items_selection_strategy = ItemsNeighborhoodStrategy()
else:
self.items_selection_strategy = items_selection_strategy
def recommend(self, user_id, how_many=None, **params):
self._set_params(**params)
candidate_items = self.all_other_items(user_id)
recommendable_items = self._top_matches(user_id, \
candidate_items, how_many)
return recommendable_items
def estimate_preference(self, user_id, item_id, **params):
preference = self.model.preference_value(user_id, item_id)
if not np.isnan(preference):
return preference
prefs = self.model.preferences_from_user(user_id)
if not self.model.has_preference_values():
prefs = [(pref, 1.0) for pref in prefs]
similarities = \
np.array([self.similarity.get_similarity(item_id, to_item_id) \
for to_item_id, pref in prefs if to_item_id != item_id]).flatten()
prefs = np.array([pref for it, pref in prefs])
prefs_sim = np.sum(prefs[~np.isnan(similarities)] *
similarities[~np.isnan(similarities)])
total_similarity = np.sum(similarities)
#that happened to have a defined similarity.
#The similarity score doesn't matter, and that
if total_similarity == 0.0 or \
not similarities[~np.isnan(similarities)].size:
return np.nan
estimated = prefs_sim / total_similarity
if self.capper:
max_p = self.model.maximum_preference_value()
min_p = self.model.minimum_preference_value()
estimated = max_p if estimated > max_p else min_p \
if estimated < min_p else estimated
return estimated
def all_other_items(self, user_id, **params):
return self.items_selection_strategy.candidate_items(user_id, \
self.model)
def _top_matches(self, source_id, target_ids, how_many=None, **params):
if target_ids.size == 0:
return np.array([])
estimate_preferences = np.vectorize(self.estimate_preference)
preferences = estimate_preferences(source_id, target_ids)
preference_values = preferences[~np.isnan(preferences)]
target_ids = target_ids[~np.isnan(preferences)]
sorted_preferences = np.lexsort((preference_values,))[::-1]
sorted_preferences = sorted_preferences[0:how_many] \
if how_many and sorted_preferences.size > how_many \
else sorted_preferences
if self.with_preference:
top_n_recs = [(target_ids[ind], \
preferences[ind]) for ind in sorted_preferences]
else:
top_n_recs = [target_ids[ind]
for ind in sorted_preferences]
return top_n_recs
def most_similar_items(self, item_id, how_many=None):
old_how_many = self.similarity.num_best
self.similarity.num_best = how_many + 1 \
if how_many is not None else None
similarities = self.similarity[item_id]
self.similarity.num_best = old_how_many
return np.array([item for item, pref in similarities \
if item != item_id and not np.isnan(pref)])
def recommended_because(self, user_id, item_id, how_many=None, **params):
preferences = self.model.preferences_from_user(user_id)
if self.model.has_preference_values():
similarities = \
np.array([self.similarity.get_similarity(item_id, to_item_id) \
for to_item_id, pref in preferences
if to_item_id != item_id]).flatten()
prefs = np.array([pref for it, pref in preferences])
item_ids = np.array([it for it, pref in preferences])
else:
similarities = \
np.array([self.similarity.get_similarity(item_id, to_item_id) \
for to_item_id in preferences
if to_item_id != item_id]).flatten()
prefs = np.array([1.0 for it in preferences])
item_ids = np.array(preferences)
scores = prefs[~np.isnan(similarities)] * \
(1.0 + similarities[~np.isnan(similarities)])
sorted_preferences = np.lexsort((scores,))[::-1]
sorted_preferences = sorted_preferences[0:how_many] \
if how_many and sorted_preferences.size > how_many \
else sorted_preferences
if self.with_preference:
top_n_recs = [(item_ids[ind], \
prefs[ind]) for ind in sorted_preferences]
else:
top_n_recs = [item_ids[ind]
for ind in sorted_preferences]
return top_n_recs
class UserBasedRecommender(UserRecommender):
def __init__(self, model, similarity, neighborhood_strategy=None,
capper=True, with_preference=False):
UserRecommender.__init__(self, model, with_preference)
self.similarity = similarity
self.capper = capper
if neighborhood_strategy is None:
self.neighborhood_strategy = NearestNeighborsStrategy()
else:
self.neighborhood_strategy = neighborhood_strategy
def all_other_items(self, user_id, **params):
n_similarity = params.pop('n_similarity', 'user_similarity')
distance = params.pop('distance', self.similarity.distance)
nhood_size = params.pop('nhood_size', None)
nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,
self.model, n_similarity, distance, nhood_size, **params)
items_from_user_id = self.model.items_from_user(user_id)
possible_items = []
for to_user_id in nearest_neighbors:
possible_items.extend(self.model.items_from_user(to_user_id))
possible_items = np.unique(np.array(possible_items).flatten())
return np.setdiff1d(possible_items, items_from_user_id)
def estimate_preference(self, user_id, item_id, **params):
preference = self.model.preference_value(user_id, item_id)
if not np.isnan(preference):
return preference
n_similarity = params.pop('n_similarity', 'user_similarity')
distance = params.pop('distance', self.similarity.distance)
nhood_size = params.pop('nhood_size', None)
nearest_neighbors = self.neighborhood_strategy.user_neighborhood(user_id,
self.model, n_similarity, distance, nhood_size, **params)
preference = 0.0
total_similarity = 0.0
similarities = np.array([self.similarity.get_similarity(user_id, to_user_id)
for to_user_id in nearest_neighbors]).flatten()
prefs = np.array([self.model.preference_value(to_user_id, item_id)
for to_user_id in nearest_neighbors])
prefs_sim = np.sum(prefs[~np.isnan(similarities)] *
similarities[~np.isnan(similarities)])
total_similarity = np.sum(similarities)
#to have a defined similarity. The similarity score doesn't
if total_similarity == 0.0 or \
not similarities[~np.isnan(similarities)].size:
return np.nan
estimated = prefs_sim / total_similarity
if self.capper:
max_p = self.model.maximum_preference_value()
min_p = self.model.minimum_preference_value()
estimated = max_p if estimated > max_p else min_p \
if estimated < min_p else estimated
return estimated
def most_similar_users(self, user_id, how_many=None):
old_how_many = self.similarity.num_best
self.similarity.num_best = how_many + 1 \
if how_many is not None else None
similarities = self.similarity[user_id]
self.similarity.num_best = old_how_many
return np.array([to_user_id for to_user_id, pref in similarities \
if user_id != to_user_id and not np.isnan(pref)])
def recommend(self, user_id, how_many=None, **params):
self.set_params(**params)
candidate_items = self.all_other_items(user_id, **params)
recommendable_items = self._top_matches(user_id, \
candidate_items, how_many)
return recommendable_items
def _top_matches(self, source_id, target_ids, how_many=None, **params):
if target_ids.size == 0:
return np.array([])
estimate_preferences = np.vectorize(self.estimate_preference)
preferences = estimate_preferences(source_id, target_ids)
preference_values = preferences[~np.isnan(preferences)]
target_ids = target_ids[~np.isnan(preferences)]
sorted_preferences = np.lexsort((preference_values,))[::-1]
sorted_preferences = sorted_preferences[0:how_many] \
if how_many and sorted_preferences.size > how_many \
else sorted_preferences
if self.with_preference:
top_n_recs = [(target_ids[ind], \
preferences[ind]) for ind in sorted_preferences]
else:
top_n_recs = [target_ids[ind]
for ind in sorted_preferences]
return top_n_recs
def recommended_because(self, user_id, item_id, how_many=None, **params):
preferences = self.model.preferences_for_item(item_id)
if self.model.has_preference_values():
similarities = \
np.array([self.similarity.get_similarity(user_id, to_user_id) \
for to_user_id, pref in preferences
if to_user_id != user_id]).flatten()
prefs = np.array([pref for it, pref in preferences])
user_ids = np.array([usr for usr, pref in preferences])
else:
similarities = \
np.array([self.similarity.get_similarity(user_id, to_user_id) \
for to_user_id in preferences
if to_user_id != user_id]).flatten()
prefs = np.array([1.0 for it in preferences])
user_ids = np.array(preferences)
scores = prefs[~np.isnan(similarities)] * \
(1.0 + similarities[~np.isnan(similarities)])
sorted_preferences = np.lexsort((scores,))[::-1]
sorted_preferences = sorted_preferences[0:how_many] \
if how_many and sorted_preferences.size > how_many \
else sorted_preferences
if self.with_preference:
top_n_recs = [(user_ids[ind], \
prefs[ind]) for ind in sorted_preferences]
else:
top_n_recs = [user_ids[ind]
for ind in sorted_preferences]
return top_n_recs
| true | true |
f72fbfc6053fee1b605915399588d9a35599ebe1 | 13,242 | py | Python | care/utils/tests/test_base.py | agzuniverse/care | 952babf5b394921fcdb4fd4b1405cb571261f322 | [
"MIT"
] | null | null | null | care/utils/tests/test_base.py | agzuniverse/care | 952babf5b394921fcdb4fd4b1405cb571261f322 | [
"MIT"
] | null | null | null | care/utils/tests/test_base.py | agzuniverse/care | 952babf5b394921fcdb4fd4b1405cb571261f322 | [
"MIT"
] | null | null | null | import abc
import datetime
from collections import OrderedDict
from typing import Any, Dict
import dateparser
from django.contrib.gis.geos import Point
from pytz import unicode
from rest_framework import status
from rest_framework.test import APITestCase
from care.facility.models import (
CATEGORY_CHOICES,
DISEASE_CHOICES_MAP,
SYMPTOM_CHOICES,
Disease,
DiseaseStatusEnum,
Facility,
LocalBody,
PatientConsultation,
PatientRegistration,
User,
)
from care.users.models import District, State
from config.tests.helper import EverythingEquals, mock_equal
class TestBase(APITestCase):
"""
Base class for tests, handles most of the test setup and tools for setting up data
"""
maxDiff = None
@classmethod
def create_user(cls, district: District, username: str = "user", **kwargs):
data = {
"email": f"{username}@somedomain.com",
"phone_number": "5554446667",
"age": 30,
"gender": 2,
"verified": True,
"username": username,
"password": "bar",
"district": district,
"user_type": User.TYPE_VALUE_MAP["Staff"],
}
data.update(kwargs)
return User.objects.create_user(**data)
@classmethod
def create_super_user(cls, district: District, username: str = "superuser"):
user = cls.create_user(district=district, username=username, user_type=User.TYPE_VALUE_MAP["DistrictAdmin"],)
user.is_superuser = True
user.save()
return user
@classmethod
def create_district(cls, state: State):
return District.objects.create(state=state, name=f"District{datetime.datetime.now().timestamp()}")
@classmethod
def create_state(cls):
return State.objects.create(name=f"State{datetime.datetime.now().timestamp()}")
@classmethod
def create_facility(cls, district: District, user: User = None, **kwargs):
user = user or cls.user
data = {
"name": "Foo",
"district": district,
"facility_type": 1,
"address": "8/88, 1st Cross, 1st Main, Boo Layout",
"location": Point(24.452545, 49.878248),
"oxygen_capacity": 10,
"phone_number": "9998887776",
"created_by": user,
}
data.update(kwargs)
f = Facility(**data)
f.save()
return f
@classmethod
def create_patient(cls, **kwargs):
patient_data = cls.get_patient_data().copy()
patient_data.update(kwargs)
medical_history = patient_data.pop("medical_history", [])
district_id = patient_data.pop("district", None)
state_id = patient_data.pop("state", None)
patient_data.update(
{
"district_id": district_id,
"state_id": state_id,
"disease_status": getattr(DiseaseStatusEnum, patient_data["disease_status"]).value,
}
)
patient = PatientRegistration.objects.create(**patient_data)
diseases = [
Disease.objects.create(patient=patient, disease=DISEASE_CHOICES_MAP[mh["disease"]], details=mh["details"])
for mh in medical_history
]
patient.medical_history.set(diseases)
return patient
@classmethod
def get_user_data(cls, district: District = None, user_type: str = None):
"""
Returns the data to be used for API testing
Returns:
dict
Params:
district: District
user_type: str(A valid mapping for the integer types mentioned inside the models)
"""
district = district or cls.district
user_type = user_type or User.TYPE_VALUE_MAP["Staff"]
return {
"user_type": user_type,
"district": district,
"state": district.state,
"phone_number": "8887776665",
"gender": 2,
"age": 30,
"email": "foo@foobar.com",
"username": "user",
"password": "bar",
}
@classmethod
def get_facility_data(cls, district):
"""
Returns the data to be used for API testing
Returns:
dict
Params:
district: int
An id for the instance of District object created
user_type: str
A valid mapping for the integer types mentioned inside the models
"""
return {
"name": "Foo",
"district": (district or cls.district).id,
"facility_type": 1,
"address": f"Address {datetime.datetime.now().timestamp}",
"location": {"latitude": 49.878248, "longitude": 24.452545},
"oxygen_capacity": 10,
"phone_number": "9998887776",
"capacity": [],
}
@classmethod
def get_patient_data(cls, district=None, state=None):
return {
"name": "Foo",
"age": 32,
"date_of_birth": datetime.date(1992, 4, 1),
"gender": 2,
"is_medical_worker": True,
"blood_group": "O+",
"ongoing_medication": "",
"date_of_return": datetime.datetime(2020, 4, 1, 15, 30, 00),
"disease_status": "SUSPECTED",
"phone_number": "+918888888888",
"address": "Global citizen",
"contact_with_confirmed_carrier": True,
"contact_with_suspected_carrier": True,
"estimated_contact_date": None,
"past_travel": False,
"countries_travelled": "",
"present_health": "Fine",
"has_SARI": False,
"is_active": True,
"state": (state or cls.state).id,
"district": (district or cls.district).id,
"local_body": None,
"number_of_aged_dependents": 2,
"number_of_chronic_diseased_dependents": 1,
"medical_history": [{"disease": "Diabetes", "details": "150 count"}],
"date_of_receipt_of_information": datetime.datetime(2020, 4, 1, 15, 30, 00),
}
@classmethod
def setUpClass(cls) -> None:
super(TestBase, cls).setUpClass()
cls.state = cls.create_state()
cls.district = cls.create_district(cls.state)
cls.user_type = User.TYPE_VALUE_MAP["Staff"]
cls.user = cls.create_user(cls.district)
cls.super_user = cls.create_super_user(district=cls.district)
cls.facility = cls.create_facility(cls.district)
cls.patient = cls.create_patient()
cls.user_data = cls.get_user_data(cls.district, cls.user_type)
cls.facility_data = cls.get_facility_data(cls.district)
cls.patient_data = cls.get_patient_data(cls.district)
def setUp(self) -> None:
self.client.force_login(self.user)
@abc.abstractmethod
def get_base_url(self):
"""
Should return the base url of the testing viewset
WITHOUT trailing slash
eg: return "api/v1/facility"
:return: str
"""
raise NotImplementedError()
def get_url(self, entry_id=None, action=None, *args, **kwargs):
url = self.get_base_url(*args, **kwargs)
if entry_id is not None:
url = f"{url}/{entry_id}"
if action is not None:
url = f"{url}/{action}"
return f"{url}/"
@classmethod
def clone_object(cls, obj, save=True):
new_obj = obj._meta.model.objects.get(pk=obj.id)
new_obj.pk = None
new_obj.id = None
if save:
new_obj.save()
return new_obj
@abc.abstractmethod
def get_list_representation(self, obj) -> dict:
"""
Returns the dict representation of the obj in list API
:param obj: Object to be represented
:return: dict
"""
raise NotImplementedError()
@abc.abstractmethod
def get_detail_representation(self, obj=None) -> dict:
"""
Returns the dict representation of the obj in detail/retrieve API
:param obj: Object to be represented
:param data: data
:return: dict
"""
raise NotImplementedError()
def get_local_body_district_state_representation(self, obj):
"""
Returns the local body, district and state representation for the obj.
The obj is expected to have `local_body`, `district` and `state` in it's attributes
Eg: Facility, Patient, User
:param obj: Any object which has `local_body`, `district` and `state` in attrs
:return:
"""
response = {}
response.update(self.get_local_body_representation(getattr(obj, "local_body", None)))
response.update(self.get_district_representation(getattr(obj, "district", None)))
response.update(self.get_state_representation(getattr(obj, "state", None)))
return response
def get_local_body_representation(self, local_body: LocalBody):
if local_body is None:
return {"local_body": None, "local_body_object": None}
else:
return {
"local_body": local_body.id,
"local_body_object": {
"id": local_body.id,
"name": local_body.name,
"district": local_body.district.id,
},
}
def get_district_representation(self, district: District):
if district is None:
return {"district": None, "district_object": None}
return {
"district": district.id,
"district_object": {"id": district.id, "name": district.name, "state": district.state.id,},
}
def get_state_representation(self, state: State):
if state is None:
return {"state": None, "state_object": None}
return {"state": state.id, "state_object": {"id": state.id, "name": state.name}}
def assertDictEqual(self, first: Dict[Any, Any], second: Dict[Any, Any], msg: Any = ...) -> None:
first_dict = self._convert_to_matchable_types(first.copy())
second_dict = self._convert_to_matchable_types(second.copy())
return super(TestBase, self).assertDictEqual(first_dict, second_dict, msg)
def _convert_to_matchable_types(self, d):
def dict_to_matching_type(d: dict):
return {k: to_matching_type(k, v) for k, v in d.items()}
def to_matching_type(name: str, value):
if isinstance(value, (OrderedDict, dict)):
return dict_to_matching_type(dict(value))
elif isinstance(value, list):
return [to_matching_type("", v) for v in value]
elif "date" in name and not isinstance(value, (type(None), EverythingEquals)):
return_value = value
if isinstance(value, (str, unicode,)):
return_value = dateparser.parse(value)
return (
return_value.astimezone(tz=datetime.timezone.utc)
if isinstance(return_value, datetime.datetime)
else return_value
)
return value
return dict_to_matching_type(d)
def execute_list(self, user=None):
user = user or self.user
self.client.force_authenticate(user)
response = self.client.get(self.get_url(), format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
return response
def get_facility_representation(self, facility):
if facility is None:
return facility
else:
return {
"id": facility.id,
"name": facility.name,
"facility_type": {"id": facility.facility_type, "name": facility.get_facility_type_display()},
**self.get_local_body_district_state_representation(facility),
}
@classmethod
def get_consultation_data(cls):
return {
"patient": cls.patient,
"facility": cls.facility,
"symptoms": [SYMPTOM_CHOICES[0][0], SYMPTOM_CHOICES[1][0]],
"other_symptoms": "No other symptoms",
"symptoms_onset_date": datetime.datetime(2020, 4, 7, 15, 30),
"category": CATEGORY_CHOICES[0][0],
"examination_details": "examination_details",
"existing_medication": "existing_medication",
"prescribed_medication": "prescribed_medication",
"suggestion": PatientConsultation.SUGGESTION_CHOICES[0][0],
"referred_to": None,
"admitted": False,
"admitted_to": None,
"admission_date": None,
"discharge_date": None,
"created_date": mock_equal,
"modified_date": mock_equal,
}
@classmethod
def create_consultation(cls, patient=None, facility=None, **kwargs) -> PatientConsultation:
data = cls.get_consultation_data()
kwargs.update({"patient": patient or cls.patient, "facility": facility or cls.facility})
data.update(kwargs)
return PatientConsultation.objects.create(**data)
| 35.596774 | 118 | 0.589337 | import abc
import datetime
from collections import OrderedDict
from typing import Any, Dict
import dateparser
from django.contrib.gis.geos import Point
from pytz import unicode
from rest_framework import status
from rest_framework.test import APITestCase
from care.facility.models import (
CATEGORY_CHOICES,
DISEASE_CHOICES_MAP,
SYMPTOM_CHOICES,
Disease,
DiseaseStatusEnum,
Facility,
LocalBody,
PatientConsultation,
PatientRegistration,
User,
)
from care.users.models import District, State
from config.tests.helper import EverythingEquals, mock_equal
class TestBase(APITestCase):
maxDiff = None
@classmethod
def create_user(cls, district: District, username: str = "user", **kwargs):
data = {
"email": f"{username}@somedomain.com",
"phone_number": "5554446667",
"age": 30,
"gender": 2,
"verified": True,
"username": username,
"password": "bar",
"district": district,
"user_type": User.TYPE_VALUE_MAP["Staff"],
}
data.update(kwargs)
return User.objects.create_user(**data)
@classmethod
def create_super_user(cls, district: District, username: str = "superuser"):
user = cls.create_user(district=district, username=username, user_type=User.TYPE_VALUE_MAP["DistrictAdmin"],)
user.is_superuser = True
user.save()
return user
@classmethod
def create_district(cls, state: State):
return District.objects.create(state=state, name=f"District{datetime.datetime.now().timestamp()}")
@classmethod
def create_state(cls):
return State.objects.create(name=f"State{datetime.datetime.now().timestamp()}")
@classmethod
def create_facility(cls, district: District, user: User = None, **kwargs):
user = user or cls.user
data = {
"name": "Foo",
"district": district,
"facility_type": 1,
"address": "8/88, 1st Cross, 1st Main, Boo Layout",
"location": Point(24.452545, 49.878248),
"oxygen_capacity": 10,
"phone_number": "9998887776",
"created_by": user,
}
data.update(kwargs)
f = Facility(**data)
f.save()
return f
@classmethod
def create_patient(cls, **kwargs):
patient_data = cls.get_patient_data().copy()
patient_data.update(kwargs)
medical_history = patient_data.pop("medical_history", [])
district_id = patient_data.pop("district", None)
state_id = patient_data.pop("state", None)
patient_data.update(
{
"district_id": district_id,
"state_id": state_id,
"disease_status": getattr(DiseaseStatusEnum, patient_data["disease_status"]).value,
}
)
patient = PatientRegistration.objects.create(**patient_data)
diseases = [
Disease.objects.create(patient=patient, disease=DISEASE_CHOICES_MAP[mh["disease"]], details=mh["details"])
for mh in medical_history
]
patient.medical_history.set(diseases)
return patient
@classmethod
def get_user_data(cls, district: District = None, user_type: str = None):
district = district or cls.district
user_type = user_type or User.TYPE_VALUE_MAP["Staff"]
return {
"user_type": user_type,
"district": district,
"state": district.state,
"phone_number": "8887776665",
"gender": 2,
"age": 30,
"email": "foo@foobar.com",
"username": "user",
"password": "bar",
}
@classmethod
def get_facility_data(cls, district):
return {
"name": "Foo",
"district": (district or cls.district).id,
"facility_type": 1,
"address": f"Address {datetime.datetime.now().timestamp}",
"location": {"latitude": 49.878248, "longitude": 24.452545},
"oxygen_capacity": 10,
"phone_number": "9998887776",
"capacity": [],
}
@classmethod
def get_patient_data(cls, district=None, state=None):
return {
"name": "Foo",
"age": 32,
"date_of_birth": datetime.date(1992, 4, 1),
"gender": 2,
"is_medical_worker": True,
"blood_group": "O+",
"ongoing_medication": "",
"date_of_return": datetime.datetime(2020, 4, 1, 15, 30, 00),
"disease_status": "SUSPECTED",
"phone_number": "+918888888888",
"address": "Global citizen",
"contact_with_confirmed_carrier": True,
"contact_with_suspected_carrier": True,
"estimated_contact_date": None,
"past_travel": False,
"countries_travelled": "",
"present_health": "Fine",
"has_SARI": False,
"is_active": True,
"state": (state or cls.state).id,
"district": (district or cls.district).id,
"local_body": None,
"number_of_aged_dependents": 2,
"number_of_chronic_diseased_dependents": 1,
"medical_history": [{"disease": "Diabetes", "details": "150 count"}],
"date_of_receipt_of_information": datetime.datetime(2020, 4, 1, 15, 30, 00),
}
@classmethod
def setUpClass(cls) -> None:
super(TestBase, cls).setUpClass()
cls.state = cls.create_state()
cls.district = cls.create_district(cls.state)
cls.user_type = User.TYPE_VALUE_MAP["Staff"]
cls.user = cls.create_user(cls.district)
cls.super_user = cls.create_super_user(district=cls.district)
cls.facility = cls.create_facility(cls.district)
cls.patient = cls.create_patient()
cls.user_data = cls.get_user_data(cls.district, cls.user_type)
cls.facility_data = cls.get_facility_data(cls.district)
cls.patient_data = cls.get_patient_data(cls.district)
def setUp(self) -> None:
self.client.force_login(self.user)
@abc.abstractmethod
def get_base_url(self):
raise NotImplementedError()
def get_url(self, entry_id=None, action=None, *args, **kwargs):
url = self.get_base_url(*args, **kwargs)
if entry_id is not None:
url = f"{url}/{entry_id}"
if action is not None:
url = f"{url}/{action}"
return f"{url}/"
@classmethod
def clone_object(cls, obj, save=True):
new_obj = obj._meta.model.objects.get(pk=obj.id)
new_obj.pk = None
new_obj.id = None
if save:
new_obj.save()
return new_obj
@abc.abstractmethod
def get_list_representation(self, obj) -> dict:
raise NotImplementedError()
@abc.abstractmethod
def get_detail_representation(self, obj=None) -> dict:
raise NotImplementedError()
def get_local_body_district_state_representation(self, obj):
response = {}
response.update(self.get_local_body_representation(getattr(obj, "local_body", None)))
response.update(self.get_district_representation(getattr(obj, "district", None)))
response.update(self.get_state_representation(getattr(obj, "state", None)))
return response
def get_local_body_representation(self, local_body: LocalBody):
if local_body is None:
return {"local_body": None, "local_body_object": None}
else:
return {
"local_body": local_body.id,
"local_body_object": {
"id": local_body.id,
"name": local_body.name,
"district": local_body.district.id,
},
}
def get_district_representation(self, district: District):
if district is None:
return {"district": None, "district_object": None}
return {
"district": district.id,
"district_object": {"id": district.id, "name": district.name, "state": district.state.id,},
}
def get_state_representation(self, state: State):
if state is None:
return {"state": None, "state_object": None}
return {"state": state.id, "state_object": {"id": state.id, "name": state.name}}
def assertDictEqual(self, first: Dict[Any, Any], second: Dict[Any, Any], msg: Any = ...) -> None:
first_dict = self._convert_to_matchable_types(first.copy())
second_dict = self._convert_to_matchable_types(second.copy())
return super(TestBase, self).assertDictEqual(first_dict, second_dict, msg)
def _convert_to_matchable_types(self, d):
def dict_to_matching_type(d: dict):
return {k: to_matching_type(k, v) for k, v in d.items()}
def to_matching_type(name: str, value):
if isinstance(value, (OrderedDict, dict)):
return dict_to_matching_type(dict(value))
elif isinstance(value, list):
return [to_matching_type("", v) for v in value]
elif "date" in name and not isinstance(value, (type(None), EverythingEquals)):
return_value = value
if isinstance(value, (str, unicode,)):
return_value = dateparser.parse(value)
return (
return_value.astimezone(tz=datetime.timezone.utc)
if isinstance(return_value, datetime.datetime)
else return_value
)
return value
return dict_to_matching_type(d)
def execute_list(self, user=None):
user = user or self.user
self.client.force_authenticate(user)
response = self.client.get(self.get_url(), format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
return response
def get_facility_representation(self, facility):
if facility is None:
return facility
else:
return {
"id": facility.id,
"name": facility.name,
"facility_type": {"id": facility.facility_type, "name": facility.get_facility_type_display()},
**self.get_local_body_district_state_representation(facility),
}
@classmethod
def get_consultation_data(cls):
return {
"patient": cls.patient,
"facility": cls.facility,
"symptoms": [SYMPTOM_CHOICES[0][0], SYMPTOM_CHOICES[1][0]],
"other_symptoms": "No other symptoms",
"symptoms_onset_date": datetime.datetime(2020, 4, 7, 15, 30),
"category": CATEGORY_CHOICES[0][0],
"examination_details": "examination_details",
"existing_medication": "existing_medication",
"prescribed_medication": "prescribed_medication",
"suggestion": PatientConsultation.SUGGESTION_CHOICES[0][0],
"referred_to": None,
"admitted": False,
"admitted_to": None,
"admission_date": None,
"discharge_date": None,
"created_date": mock_equal,
"modified_date": mock_equal,
}
@classmethod
def create_consultation(cls, patient=None, facility=None, **kwargs) -> PatientConsultation:
data = cls.get_consultation_data()
kwargs.update({"patient": patient or cls.patient, "facility": facility or cls.facility})
data.update(kwargs)
return PatientConsultation.objects.create(**data)
| true | true |
f72fc0334115b183ce538c3c6dd415915cddc916 | 1,223 | py | Python | data_statistics.py | Dipeshtamboli/domain-shift | 3f29577df6ab7269ad69a5fc651b63ed78708f0b | [
"MIT"
] | null | null | null | data_statistics.py | Dipeshtamboli/domain-shift | 3f29577df6ab7269ad69a5fc651b63ed78708f0b | [
"MIT"
] | null | null | null | data_statistics.py | Dipeshtamboli/domain-shift | 3f29577df6ab7269ad69a5fc651b63ed78708f0b | [
"MIT"
] | null | null | null | import pdb
import numpy as np
import os
import glob
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
from torch.autograd import Variable
from PIL import Image
from tqdm import tqdm
relative_path = 'datasets/resnet_features_subset_office31/'
# relative_path = 'datasets/office-31_10_class_subset/'
all_npys = glob.glob(os.path.dirname(os.path.realpath(__file__))+'/'+relative_path+"**/*.npy" , recursive=True)
num_plot_classes = 31
all_features = np.zeros((num_plot_classes*3*5,1000))
all_feat = {
"amazon": np.zeros((num_plot_classes*5,1000)),
"dslr": np.zeros((num_plot_classes*5,1000)),
"webcam": np.zeros((num_plot_classes*5,1000)),
}
domain_names =[]
class_names = []
counter = 0
for i, npy_loc in enumerate(all_npys):
unique_labels, unique_counts = np.unique(class_names, return_counts=True)
domain = npy_loc.split('/')[-3]
class_name = npy_loc.split('/')[-2]
if len(np.unique(class_names)) < num_plot_classes or class_name in class_names:
all_features[counter] = np.load(npy_loc)
counter += 1
domain_names.append(domain)
class_names.append(class_name) | 33.054054 | 112 | 0.713001 | import pdb
import numpy as np
import os
import glob
import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
from torch.autograd import Variable
from PIL import Image
from tqdm import tqdm
relative_path = 'datasets/resnet_features_subset_office31/'
all_npys = glob.glob(os.path.dirname(os.path.realpath(__file__))+'/'+relative_path+"**/*.npy" , recursive=True)
num_plot_classes = 31
all_features = np.zeros((num_plot_classes*3*5,1000))
all_feat = {
"amazon": np.zeros((num_plot_classes*5,1000)),
"dslr": np.zeros((num_plot_classes*5,1000)),
"webcam": np.zeros((num_plot_classes*5,1000)),
}
domain_names =[]
class_names = []
counter = 0
for i, npy_loc in enumerate(all_npys):
unique_labels, unique_counts = np.unique(class_names, return_counts=True)
domain = npy_loc.split('/')[-3]
class_name = npy_loc.split('/')[-2]
if len(np.unique(class_names)) < num_plot_classes or class_name in class_names:
all_features[counter] = np.load(npy_loc)
counter += 1
domain_names.append(domain)
class_names.append(class_name) | true | true |
f72fc06d644f387753e387544faebf08963a1082 | 16,871 | py | Python | tensorflow_probability/python/distributions/poisson_lognormal.py | hephaex/probability | 740d0db0bf2b1e1a04cfd0b55481c44380b3cb05 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/poisson_lognormal.py | hephaex/probability | 740d0db0bf2b1e1a04cfd0b55481c44380b3cb05 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/poisson_lognormal.py | hephaex/probability | 740d0db0bf2b1e1a04cfd0b55481c44380b3cb05 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The PoissonLogNormalQuadratureCompound distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.bijectors import exp as exp_bijector
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.distributions import poisson
from tensorflow_probability.python.distributions import seed_stream
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
__all__ = [
"PoissonLogNormalQuadratureCompound",
"quadrature_scheme_lognormal_gauss_hermite",
"quadrature_scheme_lognormal_quantiles",
]
def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None): # pylint: disable=unused-argument
"""Use Gauss-Hermite quadrature to form quadrature on positive-reals.
Note: for a given `quadrature_size`, this method is generally less accurate
than `quadrature_scheme_lognormal_quantiles`.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with tf.name_scope(name, "vector_diffeomixture_quadrature_gauss_hermite",
[loc, scale]):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(loc.dtype.as_numpy_dtype)
probs = probs.astype(loc.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=loc.dtype)
# The following maps the broadcast of `loc` and `scale` to each grid
# point, i.e., we are creating several log-rates that correspond to the
# different Gauss-Hermite quadrature points and (possible) batches of
# `loc` and `scale`.
grid = (loc[..., tf.newaxis] + np.sqrt(2.) * scale[..., tf.newaxis] * grid)
return grid, probs
def quadrature_scheme_lognormal_quantiles(
loc, scale, quadrature_size,
validate_args=False, name=None):
"""Use LogNormal quantiles to form quadrature on positive-reals.
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
grid: (Batch of) length-`quadrature_size` vectors representing the
`log_rate` parameters of a `Poisson`.
probs: (Batch of) length-`quadrature_size` vectors representing the
weight associate with each `grid` value.
"""
with tf.name_scope(name, "quadrature_scheme_lognormal_quantiles",
[loc, scale]):
# Create a LogNormal distribution.
dist = transformed_distribution.TransformedDistribution(
distribution=normal.Normal(loc=loc, scale=scale),
bijector=exp_bijector.Exp(),
validate_args=validate_args)
batch_ndims = dist.batch_shape.ndims
if batch_ndims is None:
batch_ndims = tf.shape(input=dist.batch_shape_tensor())[0]
def _compute_quantiles():
"""Helper to build quantiles."""
# Omit {0, 1} since they might lead to Inf/NaN.
zero = tf.zeros([], dtype=dist.dtype)
edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
# Expand edges so its broadcast across batch dims.
edges = tf.reshape(
edges,
shape=tf.concat(
[[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
quantiles = dist.quantile(edges)
# Cyclically permute left by one.
perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = tf.transpose(a=quantiles, perm=perm)
return quantiles
quantiles = _compute_quantiles()
# Compute grid as quantile midpoints.
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
# Set shape hints.
grid.set_shape(dist.batch_shape.concatenate([quadrature_size]))
# By construction probs is constant, i.e., `1 / quadrature_size`. This is
# important, because non-constant probs leads to non-reparameterizable
# samples.
probs = tf.fill(
dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype))
return grid, probs
class PoissonLogNormalQuadratureCompound(distribution.Distribution):
"""`PoissonLogNormalQuadratureCompound` distribution.
The `PoissonLogNormalQuadratureCompound` is an approximation to a
Poisson-LogNormal [compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution), i.e.,
```none
p(k|loc, scale)
= int_{R_+} dl LogNormal(l | loc, scale) Poisson(k | l)
approx= sum{ prob[d] Poisson(k | lambda(grid[d])) : d=0, ..., deg-1 }
```
By default, the `grid` is chosen as quantiles of the `LogNormal` distribution
parameterized by `loc`, `scale` and the `prob` vector is
`[1. / quadrature_size]*quadrature_size`.
In the non-approximation case, a draw from the LogNormal prior represents the
Poisson rate parameter. Unfortunately, the non-approximate distribution lacks
an analytical probability density function (pdf). Therefore the
`PoissonLogNormalQuadratureCompound` class implements an approximation based
on [quadrature](https://en.wikipedia.org/wiki/Numerical_integration).
Note: although the `PoissonLogNormalQuadratureCompound` is approximately the
Poisson-LogNormal compound distribution, it is itself a valid distribution.
Viz., it possesses a `sample`, `log_prob`, `mean`, `variance`, etc. which are
all mutually consistent.
#### Mathematical Details
The `PoissonLogNormalQuadratureCompound` approximates a Poisson-LogNormal
[compound distribution](
https://en.wikipedia.org/wiki/Compound_probability_distribution). Using
variable-substitution and [numerical quadrature](
https://en.wikipedia.org/wiki/Numerical_integration) (default:
based on `LogNormal` quantiles) we can redefine the distribution to be a
parameter-less convex combination of `deg` different Poisson samples.
That is, defined over positive integers, this distribution is parameterized
by a (batch of) `loc` and `scale` scalars.
The probability density function (pdf) is,
```none
pdf(k | loc, scale, deg)
= sum{ prob[d] Poisson(k | lambda=exp(grid[d]))
: d=0, ..., deg-1 }
```
#### Examples
```python
tfd = tfp.distributions
# Create two batches of PoissonLogNormalQuadratureCompounds, one with
# prior `loc = 0.` and another with `loc = 1.` In both cases `scale = 1.`
pln = tfd.PoissonLogNormalQuadratureCompound(
loc=[0., -0.5],
scale=1.,
quadrature_size=10,
validate_args=True)
"""
def __init__(self,
loc,
scale,
quadrature_size=8,
quadrature_fn=quadrature_scheme_lognormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="PoissonLogNormalQuadratureCompound"):
"""Constructs the PoissonLogNormalQuadratureCompound`.
Note: `probs` returned by (optional) `quadrature_fn` are presumed to be
either a length-`quadrature_size` vector or a batch of vectors in 1-to-1
correspondence with the returned `grid`. (I.e., broadcasting is only
partially supported.)
Args:
loc: `float`-like (batch of) scalar `Tensor`; the location parameter of
the LogNormal prior.
scale: `float`-like (batch of) scalar `Tensor`; the scale parameter of
the LogNormal prior.
quadrature_size: Python `int` scalar representing the number of quadrature
points.
quadrature_fn: Python callable taking `loc`, `scale`,
`quadrature_size`, `validate_args` and returning `tuple(grid, probs)`
representing the LogNormal grid and corresponding normalized weight.
normalized) weight.
Default value: `quadrature_scheme_lognormal_quantiles`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `quadrature_grid` and `quadrature_probs` have different base
`dtype`.
"""
parameters = dict(locals())
with tf.name_scope(name, values=[loc, scale]) as name:
dtype = dtype_util.common_dtype([loc, scale], tf.float32)
if loc is not None:
loc = tf.convert_to_tensor(value=loc, name="loc", dtype=dtype)
if scale is not None:
scale = tf.convert_to_tensor(value=scale, dtype=dtype, name="scale")
self._quadrature_grid, self._quadrature_probs = tuple(quadrature_fn(
loc, scale, quadrature_size, validate_args))
dt = self._quadrature_grid.dtype
if dt.base_dtype != self._quadrature_probs.dtype.base_dtype:
raise TypeError("Quadrature grid dtype ({}) does not match quadrature "
"probs dtype ({}).".format(
dt.name, self._quadrature_probs.dtype.name))
self._distribution = poisson.Poisson(
log_rate=self._quadrature_grid,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._mixture_distribution = categorical.Categorical(
logits=tf.math.log(self._quadrature_probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._loc = loc
self._scale = scale
self._quadrature_size = quadrature_size
super(PoissonLogNormalQuadratureCompound, self).__init__(
dtype=dt,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[loc, scale],
name=name)
@property
def mixture_distribution(self):
"""Distribution which randomly selects a Poisson with quadrature param."""
return self._mixture_distribution
@property
def distribution(self):
"""Base Poisson parameterized by a quadrature grid."""
return self._distribution
@property
def loc(self):
"""Location parameter of the LogNormal prior."""
return self._loc
@property
def scale(self):
"""Scale parameter of the LogNormal prior."""
return self._scale
@property
def quadrature_size(self):
return self._quadrature_size
def _batch_shape_tensor(self):
return tf.broadcast_dynamic_shape(
self.distribution.batch_shape_tensor(),
tf.shape(input=self.mixture_distribution.logits))[:-1]
def _batch_shape(self):
return tf.broadcast_static_shape(
self.distribution.batch_shape,
self.mixture_distribution.logits.shape)[:-1]
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
# Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
# ids as a [n]-shaped vector.
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = tf.reduce_prod(input_tensor=self.batch_shape_tensor())
# We need to "sample extra" from the mixture distribution if it doesn't
# already specify a probs vector for each batch coordinate.
# We only support this kind of reduced broadcasting, i.e., there is exactly
# one probs vector for all batch dims or one for each.
stream = seed_stream.SeedStream(
seed, salt="PoissonLogNormalQuadratureCompound")
ids = self._mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.mixture_distribution.is_scalar_batch(),
[batch_size],
np.int32([]))),
seed=stream())
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = tf.reshape(
ids,
shape=concat_vectors([n],
distribution_util.pick_vector(
self.is_scalar_batch(), np.int32([]),
np.int32([-1]))))
# Stride `quadrature_size` for `batch_size` number of times.
offset = tf.range(
start=0,
limit=batch_size * self._quadrature_size,
delta=self._quadrature_size,
dtype=ids.dtype)
ids += offset
rate = tf.gather(tf.reshape(self.distribution.rate, shape=[-1]), ids)
rate = tf.reshape(
rate, shape=concat_vectors([n], self.batch_shape_tensor()))
return tf.random.poisson(lam=rate, shape=[], dtype=self.dtype, seed=seed)
def _log_prob(self, x):
return tf.reduce_logsumexp(
input_tensor=(self.mixture_distribution.logits +
self.distribution.log_prob(x[..., tf.newaxis])),
axis=-1)
def _mean(self):
return tf.exp(
tf.reduce_logsumexp(
input_tensor=self.mixture_distribution.logits +
self.distribution.log_rate,
axis=-1))
def _variance(self):
return tf.exp(self._log_variance())
def _stddev(self):
return tf.exp(0.5 * self._log_variance())
def _log_variance(self):
# Following calculation is based on law of total variance:
#
# Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]
#
# where,
#
# Z|v ~ interpolate_affine[v](distribution)
# V ~ mixture_distribution
#
# thus,
#
# E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }
# Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }
v = tf.stack(
[
# log(self.distribution.variance()) = log(Var[d]) = log(rate[d])
self.distribution.log_rate,
# log((Mean[d] - Mean)**2)
2. * tf.math.log(
tf.abs(self.distribution.mean() -
self._mean()[..., tf.newaxis])),
],
axis=-1)
return tf.reduce_logsumexp(
input_tensor=self.mixture_distribution.logits[..., tf.newaxis] + v,
axis=[-2, -1])
def concat_vectors(*args):
"""Concatenates input vectors, statically if possible."""
args_ = [tf.get_static_value(x) for x in args]
if any(vec is None for vec in args_):
return tf.concat(args, axis=0)
return [val for vec in args_ for val in vec]
| 39.510539 | 80 | 0.684073 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.bijectors import exp as exp_bijector
from tensorflow_probability.python.distributions import categorical
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import normal
from tensorflow_probability.python.distributions import poisson
from tensorflow_probability.python.distributions import seed_stream
from tensorflow_probability.python.distributions import transformed_distribution
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
__all__ = [
"PoissonLogNormalQuadratureCompound",
"quadrature_scheme_lognormal_gauss_hermite",
"quadrature_scheme_lognormal_quantiles",
]
def quadrature_scheme_lognormal_gauss_hermite(
loc, scale, quadrature_size,
validate_args=False, name=None):
with tf.name_scope(name, "vector_diffeomixture_quadrature_gauss_hermite",
[loc, scale]):
grid, probs = np.polynomial.hermite.hermgauss(deg=quadrature_size)
grid = grid.astype(loc.dtype.as_numpy_dtype)
probs = probs.astype(loc.dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
probs = tf.convert_to_tensor(value=probs, name="probs", dtype=loc.dtype)
grid = (loc[..., tf.newaxis] + np.sqrt(2.) * scale[..., tf.newaxis] * grid)
return grid, probs
def quadrature_scheme_lognormal_quantiles(
loc, scale, quadrature_size,
validate_args=False, name=None):
with tf.name_scope(name, "quadrature_scheme_lognormal_quantiles",
[loc, scale]):
dist = transformed_distribution.TransformedDistribution(
distribution=normal.Normal(loc=loc, scale=scale),
bijector=exp_bijector.Exp(),
validate_args=validate_args)
batch_ndims = dist.batch_shape.ndims
if batch_ndims is None:
batch_ndims = tf.shape(input=dist.batch_shape_tensor())[0]
def _compute_quantiles():
zero = tf.zeros([], dtype=dist.dtype)
edges = tf.linspace(zero, 1., quadrature_size + 3)[1:-1]
edges = tf.reshape(
edges,
shape=tf.concat(
[[-1], tf.ones([batch_ndims], dtype=tf.int32)], axis=0))
quantiles = dist.quantile(edges)
perm = tf.concat([tf.range(1, 1 + batch_ndims), [0]], axis=0)
quantiles = tf.transpose(a=quantiles, perm=perm)
return quantiles
quantiles = _compute_quantiles()
grid = (quantiles[..., :-1] + quantiles[..., 1:]) / 2.
grid.set_shape(dist.batch_shape.concatenate([quadrature_size]))
probs = tf.fill(
dims=[quadrature_size], value=1. / tf.cast(quadrature_size, dist.dtype))
return grid, probs
class PoissonLogNormalQuadratureCompound(distribution.Distribution):
def __init__(self,
loc,
scale,
quadrature_size=8,
quadrature_fn=quadrature_scheme_lognormal_quantiles,
validate_args=False,
allow_nan_stats=True,
name="PoissonLogNormalQuadratureCompound"):
parameters = dict(locals())
with tf.name_scope(name, values=[loc, scale]) as name:
dtype = dtype_util.common_dtype([loc, scale], tf.float32)
if loc is not None:
loc = tf.convert_to_tensor(value=loc, name="loc", dtype=dtype)
if scale is not None:
scale = tf.convert_to_tensor(value=scale, dtype=dtype, name="scale")
self._quadrature_grid, self._quadrature_probs = tuple(quadrature_fn(
loc, scale, quadrature_size, validate_args))
dt = self._quadrature_grid.dtype
if dt.base_dtype != self._quadrature_probs.dtype.base_dtype:
raise TypeError("Quadrature grid dtype ({}) does not match quadrature "
"probs dtype ({}).".format(
dt.name, self._quadrature_probs.dtype.name))
self._distribution = poisson.Poisson(
log_rate=self._quadrature_grid,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._mixture_distribution = categorical.Categorical(
logits=tf.math.log(self._quadrature_probs),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats)
self._loc = loc
self._scale = scale
self._quadrature_size = quadrature_size
super(PoissonLogNormalQuadratureCompound, self).__init__(
dtype=dt,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[loc, scale],
name=name)
@property
def mixture_distribution(self):
return self._mixture_distribution
@property
def distribution(self):
return self._distribution
@property
def loc(self):
return self._loc
@property
def scale(self):
return self._scale
@property
def quadrature_size(self):
return self._quadrature_size
def _batch_shape_tensor(self):
return tf.broadcast_dynamic_shape(
self.distribution.batch_shape_tensor(),
tf.shape(input=self.mixture_distribution.logits))[:-1]
def _batch_shape(self):
return tf.broadcast_static_shape(
self.distribution.batch_shape,
self.mixture_distribution.logits.shape)[:-1]
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
batch_size = self.batch_shape.num_elements()
if batch_size is None:
batch_size = tf.reduce_prod(input_tensor=self.batch_shape_tensor())
# already specify a probs vector for each batch coordinate.
# We only support this kind of reduced broadcasting, i.e., there is exactly
# one probs vector for all batch dims or one for each.
stream = seed_stream.SeedStream(
seed, salt="PoissonLogNormalQuadratureCompound")
ids = self._mixture_distribution.sample(
sample_shape=concat_vectors(
[n],
distribution_util.pick_vector(
self.mixture_distribution.is_scalar_batch(),
[batch_size],
np.int32([]))),
seed=stream())
# We need to flatten batch dims in case mixture_distribution has its own
# batch dims.
ids = tf.reshape(
ids,
shape=concat_vectors([n],
distribution_util.pick_vector(
self.is_scalar_batch(), np.int32([]),
np.int32([-1]))))
# Stride `quadrature_size` for `batch_size` number of times.
offset = tf.range(
start=0,
limit=batch_size * self._quadrature_size,
delta=self._quadrature_size,
dtype=ids.dtype)
ids += offset
rate = tf.gather(tf.reshape(self.distribution.rate, shape=[-1]), ids)
rate = tf.reshape(
rate, shape=concat_vectors([n], self.batch_shape_tensor()))
return tf.random.poisson(lam=rate, shape=[], dtype=self.dtype, seed=seed)
def _log_prob(self, x):
return tf.reduce_logsumexp(
input_tensor=(self.mixture_distribution.logits +
self.distribution.log_prob(x[..., tf.newaxis])),
axis=-1)
def _mean(self):
return tf.exp(
tf.reduce_logsumexp(
input_tensor=self.mixture_distribution.logits +
self.distribution.log_rate,
axis=-1))
def _variance(self):
return tf.exp(self._log_variance())
def _stddev(self):
return tf.exp(0.5 * self._log_variance())
def _log_variance(self):
# Following calculation is based on law of total variance:
#
# Var[Z] = E[Var[Z | V]] + Var[E[Z | V]]
#
# where,
#
# Z|v ~ interpolate_affine[v](distribution)
# V ~ mixture_distribution
#
# thus,
#
# E[Var[Z | V]] = sum{ prob[d] Var[d] : d=0, ..., deg-1 }
# Var[E[Z | V]] = sum{ prob[d] (Mean[d] - Mean)**2 : d=0, ..., deg-1 }
v = tf.stack(
[
# log(self.distribution.variance()) = log(Var[d]) = log(rate[d])
self.distribution.log_rate,
# log((Mean[d] - Mean)**2)
2. * tf.math.log(
tf.abs(self.distribution.mean() -
self._mean()[..., tf.newaxis])),
],
axis=-1)
return tf.reduce_logsumexp(
input_tensor=self.mixture_distribution.logits[..., tf.newaxis] + v,
axis=[-2, -1])
def concat_vectors(*args):
args_ = [tf.get_static_value(x) for x in args]
if any(vec is None for vec in args_):
return tf.concat(args, axis=0)
return [val for vec in args_ for val in vec]
| true | true |
f72fc09102b906fd3f59703976525e7e5cd9e483 | 2,338 | py | Python | tests/test_paramark.py | mrzechonek/pytest-paramark | 2c899e200eb0d68e66cd4e32e46c9cdd396845ec | [
"MIT"
] | 1 | 2021-12-23T11:21:16.000Z | 2021-12-23T11:21:16.000Z | tests/test_paramark.py | mrzechonek/pytest-paramark | 2c899e200eb0d68e66cd4e32e46c9cdd396845ec | [
"MIT"
] | null | null | null | tests/test_paramark.py | mrzechonek/pytest-paramark | 2c899e200eb0d68e66cd4e32e46c9cdd396845ec | [
"MIT"
] | null | null | null | from namedlist import namedlist
import pytest
# fmt: off
@pytest.fixture(indirect=True)
def foo(request):
Foo = namedlist('Foo', (
('some_option', 42),
('another_option', 'test'),
))
return Foo(**request.param)
@pytest.fixture(indirect=True)
def bar(request):
Bar = namedlist('Bar', (
('some_option', True),
('another_option', False),
))
return Bar(**request.param)
def test_default(foo, bar):
assert foo.some_option == 42
assert foo.another_option == 'test'
assert bar.some_option is True
assert bar.another_option is False
@pytest.mark.parametrize(
('foo.some_option', 'foo_plus_three',),
[
(1, 4),
(7, 10),
],
)
def test_fixture_and_argument(foo, foo_plus_three):
assert foo.some_option + 3 == foo_plus_three
@pytest.mark.parametrize(
('foo.some_option', 'bar.some_option',),
[
(5, 5),
(3, 7),
]
)
def test_two_fixtures(foo, bar):
assert foo.some_option + bar.some_option == 10
@pytest.mark.parametrize(
'foo.some_option',
[
0x420,
]
)
@pytest.mark.parametrize(
'foo.another_option',
[
5,
6,
]
)
def test_parametrized_nesting(request, foo):
assert foo.some_option == 0x420
assert foo.another_option in (5, 6)
@pytest.mark.parametrize(
'foo.*',
[
dict(some_option=0x420),
]
)
def test_indirect(request, foo):
assert foo.some_option == 0x420
@pytest.mark.parametrize(
('foo.some_option', 'qux', 'bar.another_option'),
[
(0x420, 'qux', 5),
]
)
def test_parametrized_mixed(foo, bar, qux):
assert foo.some_option == 0x420
assert bar.another_option == 5
assert qux == 'qux'
@pytest.mark.foo(some_option=24, another_option='five')
def test_shortcut(foo, bar):
assert foo.some_option == 24
assert foo.another_option == 'five'
assert bar.some_option is True
assert bar.another_option is False
@pytest.mark.parametrize('foo.some_option', [3])
@pytest.mark.parametrize('foo.some_option', [1])
@pytest.mark.parametrize('foo.some_option', [2])
def test_closest(foo):
assert foo.some_option == 2
@pytest.mark.foo(some_option=3)
@pytest.mark.foo(some_option=1)
@pytest.mark.foo(some_option=2)
def test_closest_shortcut(foo):
assert foo.some_option == 2
| 20.155172 | 55 | 0.641574 | from namedlist import namedlist
import pytest
@pytest.fixture(indirect=True)
def foo(request):
Foo = namedlist('Foo', (
('some_option', 42),
('another_option', 'test'),
))
return Foo(**request.param)
@pytest.fixture(indirect=True)
def bar(request):
Bar = namedlist('Bar', (
('some_option', True),
('another_option', False),
))
return Bar(**request.param)
def test_default(foo, bar):
assert foo.some_option == 42
assert foo.another_option == 'test'
assert bar.some_option is True
assert bar.another_option is False
@pytest.mark.parametrize(
('foo.some_option', 'foo_plus_three',),
[
(1, 4),
(7, 10),
],
)
def test_fixture_and_argument(foo, foo_plus_three):
assert foo.some_option + 3 == foo_plus_three
@pytest.mark.parametrize(
('foo.some_option', 'bar.some_option',),
[
(5, 5),
(3, 7),
]
)
def test_two_fixtures(foo, bar):
assert foo.some_option + bar.some_option == 10
@pytest.mark.parametrize(
'foo.some_option',
[
0x420,
]
)
@pytest.mark.parametrize(
'foo.another_option',
[
5,
6,
]
)
def test_parametrized_nesting(request, foo):
assert foo.some_option == 0x420
assert foo.another_option in (5, 6)
@pytest.mark.parametrize(
'foo.*',
[
dict(some_option=0x420),
]
)
def test_indirect(request, foo):
assert foo.some_option == 0x420
@pytest.mark.parametrize(
('foo.some_option', 'qux', 'bar.another_option'),
[
(0x420, 'qux', 5),
]
)
def test_parametrized_mixed(foo, bar, qux):
assert foo.some_option == 0x420
assert bar.another_option == 5
assert qux == 'qux'
@pytest.mark.foo(some_option=24, another_option='five')
def test_shortcut(foo, bar):
assert foo.some_option == 24
assert foo.another_option == 'five'
assert bar.some_option is True
assert bar.another_option is False
@pytest.mark.parametrize('foo.some_option', [3])
@pytest.mark.parametrize('foo.some_option', [1])
@pytest.mark.parametrize('foo.some_option', [2])
def test_closest(foo):
assert foo.some_option == 2
@pytest.mark.foo(some_option=3)
@pytest.mark.foo(some_option=1)
@pytest.mark.foo(some_option=2)
def test_closest_shortcut(foo):
assert foo.some_option == 2
| true | true |
f72fc0b2e52b6be3a20c325a24aba237a4e6319d | 1,372 | py | Python | bigtable/hello_happybase/main_test.py | thesugar/python-docs-samples | 1a59ca688f1d7602d52cd4088fa7b6e3afe0afd0 | [
"Apache-2.0"
] | 34 | 2020-07-27T19:14:01.000Z | 2022-03-31T14:46:53.000Z | bigtable/hello_happybase/main_test.py | thesugar/python-docs-samples | 1a59ca688f1d7602d52cd4088fa7b6e3afe0afd0 | [
"Apache-2.0"
] | 254 | 2020-01-31T23:44:06.000Z | 2022-03-23T22:52:49.000Z | bigtable/hello_happybase/main_test.py | thesugar/python-docs-samples | 1a59ca688f1d7602d52cd4088fa7b6e3afe0afd0 | [
"Apache-2.0"
] | 30 | 2020-01-31T20:45:34.000Z | 2022-03-23T19:56:42.000Z | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from main import main
PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
TABLE_NAME_FORMAT = 'hello-world-hb-test-{}'
TABLE_NAME_RANGE = 10000
def test_main(capsys):
table_name = TABLE_NAME_FORMAT.format(
random.randrange(TABLE_NAME_RANGE))
main(
PROJECT,
BIGTABLE_INSTANCE,
table_name)
out, _ = capsys.readouterr()
assert 'Creating the {} table.'.format(table_name) in out
assert 'Writing some greetings to the table.' in out
assert 'Getting a single greeting by row key.' in out
assert 'Hello World!' in out
assert 'Scanning for all greetings' in out
assert 'Hello Cloud Bigtable!' in out
assert 'Deleting the {} table.'.format(table_name) in out
| 32.666667 | 74 | 0.729592 |
import os
import random
from main import main
PROJECT = os.environ['GOOGLE_CLOUD_PROJECT']
BIGTABLE_INSTANCE = os.environ['BIGTABLE_INSTANCE']
TABLE_NAME_FORMAT = 'hello-world-hb-test-{}'
TABLE_NAME_RANGE = 10000
def test_main(capsys):
table_name = TABLE_NAME_FORMAT.format(
random.randrange(TABLE_NAME_RANGE))
main(
PROJECT,
BIGTABLE_INSTANCE,
table_name)
out, _ = capsys.readouterr()
assert 'Creating the {} table.'.format(table_name) in out
assert 'Writing some greetings to the table.' in out
assert 'Getting a single greeting by row key.' in out
assert 'Hello World!' in out
assert 'Scanning for all greetings' in out
assert 'Hello Cloud Bigtable!' in out
assert 'Deleting the {} table.'.format(table_name) in out
| true | true |
f72fc11fa59ceffe2e3f49244bef15eddabf9421 | 7,807 | py | Python | cloudcafe/compute/flavors_api/models/flavor.py | ProjectMeniscus/cloudcafe | fa8fd796b303f0c5f0d6e98b2b5d01f6ea8fefe9 | [
"Apache-2.0"
] | null | null | null | cloudcafe/compute/flavors_api/models/flavor.py | ProjectMeniscus/cloudcafe | fa8fd796b303f0c5f0d6e98b2b5d01f6ea8fefe9 | [
"Apache-2.0"
] | null | null | null | cloudcafe/compute/flavors_api/models/flavor.py | ProjectMeniscus/cloudcafe | fa8fd796b303f0c5f0d6e98b2b5d01f6ea8fefe9 | [
"Apache-2.0"
] | 1 | 2020-11-17T19:05:08.000Z | 2020-11-17T19:05:08.000Z | """
Copyright 2013 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import xml.etree.ElementTree as ET
from cafe.engine.models.base import AutoMarshallingModel
from cloudcafe.compute.common.equality_tools import EqualityTools
from cloudcafe.compute.common.constants import Constants
from cloudcafe.compute.common.models.link import Links
class CreateFlavor(AutoMarshallingModel):
def __init__(self, name=None, ram=None, vcpus=None,
disk=None, id=None, is_public=None):
super(CreateFlavor, self).__init__()
self.id = id
self.name = name
self.ram = ram
self.disk = disk
self.vcpus = vcpus
self.is_public = is_public
def _obj_to_json(self):
ret = {'flavor': self._obj_to_dict()}
return json.dumps(ret)
def _obj_to_dict(self):
ret = {}
ret['id'] = self.id
ret['name'] = self.name
ret['ram'] = int(self.ram)
ret['disk'] = int(self.disk)
ret['vcpus'] = int(self.vcpus)
ret['os-flavor-access:is_public'] = self.is_public
return ret
@classmethod
def _xml_to_obj(cls, serialized_str):
raise NotImplemented
@classmethod
def _xml_list_to_obj(cls, xml_list):
raise NotImplemented
class Flavor(AutoMarshallingModel):
def __init__(self, id=None, name=None, ram=None, disk=None, vcpus=None,
swap=None, rxtx_factor=None, links=None):
"""
An object that represents a flavor.
"""
self.id = id
self.name = name
self.ram = ram
self.disk = disk
self.vcpus = vcpus
self.links = links
def __repr__(self):
values = []
for prop in self.__dict__:
values.append("%s: %s" % (prop, self.__dict__[prop]))
return '[' + ', '.join(values) + ']'
@classmethod
def _json_to_obj(cls, serialized_str):
"""
Returns an instance of a Flavor based on the json serialized_str
passed in.
"""
json_dict = json.loads(serialized_str)
if 'flavor' in json_dict.keys():
flavor = cls._dict_to_obj(json_dict['flavor'])
return flavor
if 'flavors' in json_dict.keys():
flavors = []
for flavor_dict in json_dict['flavors']:
flavor = cls._dict_to_obj(flavor_dict)
flavors.append(flavor)
return flavors
@classmethod
def _dict_to_obj(cls, flavor_dict):
"""Helper method to turn dictionary into Server instance."""
flavor = Flavor(id=flavor_dict.get('id'),
name=flavor_dict.get('name'),
ram=flavor_dict.get('ram'),
disk=flavor_dict.get('disk'),
vcpus=flavor_dict.get('vcpus'))
flavor.links = Links._dict_to_obj(flavor_dict['links'])
return flavor
@classmethod
def _xml_to_obj(cls, serialized_str):
"""
Returns an instance of a Flavor based on the xml serialized_str
passed in.
"""
element = ET.fromstring(serialized_str)
cls._remove_xml_etree_namespace(element, Constants.XML_API_NAMESPACE)
cls._remove_xml_etree_namespace(element,
Constants.XML_API_ATOM_NAMESPACE)
if element.tag == 'flavor':
flavor = cls._xml_ele_to_obj(element)
return flavor
if element.tag == 'flavors':
flavors = []
for flavor in element.findall('flavor'):
flavor = cls._xml_ele_to_obj(flavor)
flavors.append(flavor)
return flavors
@classmethod
def _xml_ele_to_obj(cls, element):
"""Helper method to turn ElementTree instance to Flavor instance."""
flavor_dict = element.attrib
if 'vcpus' in flavor_dict:
flavor_dict['vcpus'] = (flavor_dict.get('vcpus') and
int(flavor_dict.get('vcpus')))
if 'disk' in flavor_dict:
flavor_dict['disk'] = (flavor_dict.get('disk') and
int(flavor_dict.get('disk')))
if 'rxtx_factor' in flavor_dict:
flavor_dict['rxtx_factor'] = flavor_dict.get('rxtx_factor') \
and float(flavor_dict.get('rxtx_factor'))
if 'ram' in flavor_dict:
flavor_dict['ram'] = flavor_dict.get('ram') \
and int(flavor_dict.get('ram'))
if 'swap' in flavor_dict:
flavor_dict['swap'] = flavor_dict.get('swap') \
and int(flavor_dict.get('swap'))
links = Links._xml_ele_to_obj(element)
flavor = Flavor(flavor_dict.get('id'), flavor_dict.get('name'),
flavor_dict.get('ram'), flavor_dict.get('disk'),
flavor_dict.get('vcpus'), flavor_dict.get('swap'),
flavor_dict.get('rxtx_factor'), links)
return flavor
def __eq__(self, other):
"""
@summary: Overrides the default equals
@param other: Flavor object to compare with
@type other: Flavor
@return: True if Flavor objects are equal, False otherwise
@rtype: bool
"""
return EqualityTools.are_objects_equal(self, other, ['links'])
def __ne__(self, other):
"""
@summary: Overrides the default not-equals
@param other: Flavor object to compare with
@type other: Flavor
@return: True if Flavor objects are not equal, False otherwise
@rtype: bool
"""
return not self == other
class FlavorMin(Flavor):
"""
@summary: Represents minimum details of a flavor
"""
def __init__(self, **kwargs):
"""Flavor Min has only id, name and links"""
for keys, values in kwargs.items():
setattr(self, keys, values)
def __eq__(self, other):
"""
@summary: Overrides the default equals
@param other: FlavorMin object to compare with
@type other: FlavorMin
@return: True if FlavorMin objects are equal, False otherwise
@rtype: bool
"""
return EqualityTools.are_objects_equal(self, other, ['links'])
def __ne__(self, other):
"""
@summary: Overrides the default not-equals
@param other: FlavorMin object to compare with
@type other: FlavorMin
@return: True if FlavorMin objects are not equal, False otherwise
@rtype: bool
"""
return not self == other
@classmethod
def _xml_ele_to_obj(cls, element):
"""Helper method to turn ElementTree instance to Server instance."""
flavor_dict = element.attrib
flavor_min = FlavorMin(id=flavor_dict.get('id'),
name=flavor_dict.get('name'))
flavor_min.links = Links._xml_ele_to_obj(element)
return flavor_min
@classmethod
def _dict_to_obj(cls, flavor_dict):
"""Helper method to turn dictionary into Server instance."""
flavor_min = FlavorMin(id=flavor_dict.get('id'),
name=flavor_dict.get('name'))
flavor_min.links = Links._dict_to_obj(flavor_dict['links'])
return flavor_min
| 34.39207 | 77 | 0.599846 |
import json
import xml.etree.ElementTree as ET
from cafe.engine.models.base import AutoMarshallingModel
from cloudcafe.compute.common.equality_tools import EqualityTools
from cloudcafe.compute.common.constants import Constants
from cloudcafe.compute.common.models.link import Links
class CreateFlavor(AutoMarshallingModel):
def __init__(self, name=None, ram=None, vcpus=None,
disk=None, id=None, is_public=None):
super(CreateFlavor, self).__init__()
self.id = id
self.name = name
self.ram = ram
self.disk = disk
self.vcpus = vcpus
self.is_public = is_public
def _obj_to_json(self):
ret = {'flavor': self._obj_to_dict()}
return json.dumps(ret)
def _obj_to_dict(self):
ret = {}
ret['id'] = self.id
ret['name'] = self.name
ret['ram'] = int(self.ram)
ret['disk'] = int(self.disk)
ret['vcpus'] = int(self.vcpus)
ret['os-flavor-access:is_public'] = self.is_public
return ret
@classmethod
def _xml_to_obj(cls, serialized_str):
raise NotImplemented
@classmethod
def _xml_list_to_obj(cls, xml_list):
raise NotImplemented
class Flavor(AutoMarshallingModel):
def __init__(self, id=None, name=None, ram=None, disk=None, vcpus=None,
swap=None, rxtx_factor=None, links=None):
self.id = id
self.name = name
self.ram = ram
self.disk = disk
self.vcpus = vcpus
self.links = links
def __repr__(self):
values = []
for prop in self.__dict__:
values.append("%s: %s" % (prop, self.__dict__[prop]))
return '[' + ', '.join(values) + ']'
@classmethod
def _json_to_obj(cls, serialized_str):
json_dict = json.loads(serialized_str)
if 'flavor' in json_dict.keys():
flavor = cls._dict_to_obj(json_dict['flavor'])
return flavor
if 'flavors' in json_dict.keys():
flavors = []
for flavor_dict in json_dict['flavors']:
flavor = cls._dict_to_obj(flavor_dict)
flavors.append(flavor)
return flavors
@classmethod
def _dict_to_obj(cls, flavor_dict):
flavor = Flavor(id=flavor_dict.get('id'),
name=flavor_dict.get('name'),
ram=flavor_dict.get('ram'),
disk=flavor_dict.get('disk'),
vcpus=flavor_dict.get('vcpus'))
flavor.links = Links._dict_to_obj(flavor_dict['links'])
return flavor
@classmethod
def _xml_to_obj(cls, serialized_str):
element = ET.fromstring(serialized_str)
cls._remove_xml_etree_namespace(element, Constants.XML_API_NAMESPACE)
cls._remove_xml_etree_namespace(element,
Constants.XML_API_ATOM_NAMESPACE)
if element.tag == 'flavor':
flavor = cls._xml_ele_to_obj(element)
return flavor
if element.tag == 'flavors':
flavors = []
for flavor in element.findall('flavor'):
flavor = cls._xml_ele_to_obj(flavor)
flavors.append(flavor)
return flavors
@classmethod
def _xml_ele_to_obj(cls, element):
flavor_dict = element.attrib
if 'vcpus' in flavor_dict:
flavor_dict['vcpus'] = (flavor_dict.get('vcpus') and
int(flavor_dict.get('vcpus')))
if 'disk' in flavor_dict:
flavor_dict['disk'] = (flavor_dict.get('disk') and
int(flavor_dict.get('disk')))
if 'rxtx_factor' in flavor_dict:
flavor_dict['rxtx_factor'] = flavor_dict.get('rxtx_factor') \
and float(flavor_dict.get('rxtx_factor'))
if 'ram' in flavor_dict:
flavor_dict['ram'] = flavor_dict.get('ram') \
and int(flavor_dict.get('ram'))
if 'swap' in flavor_dict:
flavor_dict['swap'] = flavor_dict.get('swap') \
and int(flavor_dict.get('swap'))
links = Links._xml_ele_to_obj(element)
flavor = Flavor(flavor_dict.get('id'), flavor_dict.get('name'),
flavor_dict.get('ram'), flavor_dict.get('disk'),
flavor_dict.get('vcpus'), flavor_dict.get('swap'),
flavor_dict.get('rxtx_factor'), links)
return flavor
def __eq__(self, other):
return EqualityTools.are_objects_equal(self, other, ['links'])
def __ne__(self, other):
return not self == other
class FlavorMin(Flavor):
def __init__(self, **kwargs):
for keys, values in kwargs.items():
setattr(self, keys, values)
def __eq__(self, other):
return EqualityTools.are_objects_equal(self, other, ['links'])
def __ne__(self, other):
return not self == other
@classmethod
def _xml_ele_to_obj(cls, element):
flavor_dict = element.attrib
flavor_min = FlavorMin(id=flavor_dict.get('id'),
name=flavor_dict.get('name'))
flavor_min.links = Links._xml_ele_to_obj(element)
return flavor_min
@classmethod
def _dict_to_obj(cls, flavor_dict):
flavor_min = FlavorMin(id=flavor_dict.get('id'),
name=flavor_dict.get('name'))
flavor_min.links = Links._dict_to_obj(flavor_dict['links'])
return flavor_min
| true | true |
f72fc1298f0c5130bd5594ba286c250a4a144484 | 524 | py | Python | build/srslib_test/catkin_generated/pkg.develspace.context.pc.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | build/srslib_test/catkin_generated/pkg.develspace.context.pc.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | build/srslib_test/catkin_generated/pkg.develspace.context.pc.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kalyco/mfp_workspace/src/srslib_test/include".split(';') if "/home/kalyco/mfp_workspace/src/srslib_test/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lsrslib_test".split(';') if "-lsrslib_test" != "" else []
PROJECT_NAME = "srslib_test"
PROJECT_SPACE_DIR = "/home/kalyco/mfp_workspace/devel/.private/srslib_test"
PROJECT_VERSION = "1.0.0"
| 58.222222 | 167 | 0.757634 |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kalyco/mfp_workspace/src/srslib_test/include".split(';') if "/home/kalyco/mfp_workspace/src/srslib_test/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lsrslib_test".split(';') if "-lsrslib_test" != "" else []
PROJECT_NAME = "srslib_test"
PROJECT_SPACE_DIR = "/home/kalyco/mfp_workspace/devel/.private/srslib_test"
PROJECT_VERSION = "1.0.0"
| true | true |
f72fc212dd0b3eb11cf3285fa9470daba40b1324 | 9,865 | py | Python | web3/providers/eth_tester/middleware.py | ayushkumar63123/web3.py | 4dda2db9d27a409f1a9c2b4a8ec917b53c51383f | [
"MIT"
] | 1 | 2022-03-19T03:49:34.000Z | 2022-03-19T03:49:34.000Z | web3/providers/eth_tester/middleware.py | ayushkumar63123/web3.py | 4dda2db9d27a409f1a9c2b4a8ec917b53c51383f | [
"MIT"
] | null | null | null | web3/providers/eth_tester/middleware.py | ayushkumar63123/web3.py | 4dda2db9d27a409f1a9c2b4a8ec917b53c51383f | [
"MIT"
] | 1 | 2021-11-12T00:38:42.000Z | 2021-11-12T00:38:42.000Z | import operator
from typing import (
TYPE_CHECKING,
Any,
Callable,
)
from eth_typing import (
ChecksumAddress,
)
from eth_utils import (
is_dict,
is_hex,
is_string,
)
from eth_utils.curried import (
apply_formatter_if,
apply_formatters_to_dict,
)
from eth_utils.toolz import (
assoc,
complement,
compose,
curry,
identity,
partial,
pipe,
)
from web3._utils.formatters import (
apply_formatter_to_array,
apply_formatters_to_args,
apply_key_map,
hex_to_integer,
integer_to_hex,
is_array_of_dicts,
static_return,
)
from web3.middleware import (
construct_formatting_middleware,
)
from web3.types import (
RPCEndpoint,
RPCResponse,
TxParams,
)
if TYPE_CHECKING:
from web3 import ( # noqa: F401
Web3,
)
def is_named_block(value: Any) -> bool:
return value in {"latest", "earliest", "pending"}
def is_hexstr(value: Any) -> bool:
return is_string(value) and is_hex(value)
to_integer_if_hex = apply_formatter_if(is_hexstr, hex_to_integer)
is_not_named_block = complement(is_named_block)
TRANSACTION_KEY_MAPPINGS = {
'access_list': 'accessList',
'block_hash': 'blockHash',
'block_number': 'blockNumber',
'gas_price': 'gasPrice',
'max_fee_per_gas': 'maxFeePerGas',
'max_priority_fee_per_gas': 'maxPriorityFeePerGas',
'transaction_hash': 'transactionHash',
'transaction_index': 'transactionIndex',
}
transaction_key_remapper = apply_key_map(TRANSACTION_KEY_MAPPINGS)
LOG_KEY_MAPPINGS = {
'log_index': 'logIndex',
'transaction_index': 'transactionIndex',
'transaction_hash': 'transactionHash',
'block_hash': 'blockHash',
'block_number': 'blockNumber',
}
log_key_remapper = apply_key_map(LOG_KEY_MAPPINGS)
RECEIPT_KEY_MAPPINGS = {
'block_hash': 'blockHash',
'block_number': 'blockNumber',
'contract_address': 'contractAddress',
'gas_used': 'gasUsed',
'cumulative_gas_used': 'cumulativeGasUsed',
'effective_gas_price': 'effectiveGasPrice',
'transaction_hash': 'transactionHash',
'transaction_index': 'transactionIndex',
}
receipt_key_remapper = apply_key_map(RECEIPT_KEY_MAPPINGS)
BLOCK_KEY_MAPPINGS = {
'gas_limit': 'gasLimit',
'sha3_uncles': 'sha3Uncles',
'transactions_root': 'transactionsRoot',
'parent_hash': 'parentHash',
'bloom': 'logsBloom',
'state_root': 'stateRoot',
'receipt_root': 'receiptsRoot',
'total_difficulty': 'totalDifficulty',
'extra_data': 'extraData',
'gas_used': 'gasUsed',
'base_fee_per_gas': 'baseFeePerGas',
}
block_key_remapper = apply_key_map(BLOCK_KEY_MAPPINGS)
TRANSACTION_PARAMS_MAPPING = {
'gasPrice': 'gas_price',
'maxFeePerGas': 'max_fee_per_gas',
'maxPriorityFeePerGas': 'max_priority_fee_per_gas',
'accessList': 'access_list',
}
transaction_params_remapper = apply_key_map(TRANSACTION_PARAMS_MAPPING)
REQUEST_TRANSACTION_FORMATTERS = {
'gas': to_integer_if_hex,
'gasPrice': to_integer_if_hex,
'value': to_integer_if_hex,
'nonce': to_integer_if_hex,
'maxFeePerGas': to_integer_if_hex,
'maxPriorityFeePerGas': to_integer_if_hex,
}
request_transaction_formatter = apply_formatters_to_dict(REQUEST_TRANSACTION_FORMATTERS)
FILTER_PARAMS_MAPPINGS = {
'fromBlock': 'from_block',
'toBlock': 'to_block',
}
filter_params_remapper = apply_key_map(FILTER_PARAMS_MAPPINGS)
FILTER_PARAMS_FORMATTERS = {
'fromBlock': to_integer_if_hex,
'toBlock': to_integer_if_hex,
}
filter_params_formatter = apply_formatters_to_dict(FILTER_PARAMS_FORMATTERS)
filter_params_transformer = compose(filter_params_remapper, filter_params_formatter)
RESPONSE_TRANSACTION_FORMATTERS = {
'to': apply_formatter_if(partial(operator.eq, ''), static_return(None)),
}
response_transaction_formatter = apply_formatters_to_dict(RESPONSE_TRANSACTION_FORMATTERS)
RECEIPT_FORMATTERS = {
'logs': apply_formatter_to_array(log_key_remapper),
}
receipt_formatter = apply_formatters_to_dict(RECEIPT_FORMATTERS)
transaction_params_transformer = compose(transaction_params_remapper, request_transaction_formatter)
ethereum_tester_middleware = construct_formatting_middleware(
request_formatters={
# Eth
RPCEndpoint('eth_getBlockByNumber'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_getFilterChanges'): apply_formatters_to_args(hex_to_integer),
RPCEndpoint('eth_getFilterLogs'): apply_formatters_to_args(hex_to_integer),
RPCEndpoint('eth_getBlockTransactionCountByNumber'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_getUncleCountByBlockNumber'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_getTransactionByBlockHashAndIndex'): apply_formatters_to_args(
identity,
to_integer_if_hex,
),
RPCEndpoint('eth_getTransactionByBlockNumberAndIndex'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
to_integer_if_hex,
),
RPCEndpoint('eth_getUncleByBlockNumberAndIndex'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
to_integer_if_hex,
),
RPCEndpoint('eth_newFilter'): apply_formatters_to_args(
filter_params_transformer,
),
RPCEndpoint('eth_getLogs'): apply_formatters_to_args(
filter_params_transformer,
),
RPCEndpoint('eth_sendTransaction'): apply_formatters_to_args(
transaction_params_transformer,
),
RPCEndpoint('eth_estimateGas'): apply_formatters_to_args(
transaction_params_transformer,
),
RPCEndpoint('eth_call'): apply_formatters_to_args(
transaction_params_transformer,
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_uninstallFilter'): apply_formatters_to_args(hex_to_integer),
RPCEndpoint('eth_getCode'): apply_formatters_to_args(
identity,
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_getBalance'): apply_formatters_to_args(
identity,
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
# EVM
RPCEndpoint('evm_revert'): apply_formatters_to_args(hex_to_integer),
# Personal
RPCEndpoint('personal_sendTransaction'): apply_formatters_to_args(
transaction_params_transformer,
identity,
),
},
result_formatters={
RPCEndpoint('eth_getBlockByHash'): apply_formatter_if(
is_dict,
block_key_remapper,
),
RPCEndpoint('eth_getBlockByNumber'): apply_formatter_if(
is_dict,
block_key_remapper,
),
RPCEndpoint('eth_getBlockTransactionCountByHash'): apply_formatter_if(
is_dict,
transaction_key_remapper,
),
RPCEndpoint('eth_getBlockTransactionCountByNumber'): apply_formatter_if(
is_dict,
transaction_key_remapper,
),
RPCEndpoint('eth_getTransactionByHash'): apply_formatter_if(
is_dict,
compose(transaction_key_remapper, response_transaction_formatter),
),
RPCEndpoint('eth_getTransactionReceipt'): apply_formatter_if(
is_dict,
compose(receipt_key_remapper, receipt_formatter),
),
RPCEndpoint('eth_newFilter'): integer_to_hex,
RPCEndpoint('eth_newBlockFilter'): integer_to_hex,
RPCEndpoint('eth_newPendingTransactionFilter'): integer_to_hex,
RPCEndpoint('eth_getLogs'): apply_formatter_if(
is_array_of_dicts,
apply_formatter_to_array(log_key_remapper),
),
RPCEndpoint('eth_getFilterChanges'): apply_formatter_if(
is_array_of_dicts,
apply_formatter_to_array(log_key_remapper),
),
RPCEndpoint('eth_getFilterLogs'): apply_formatter_if(
is_array_of_dicts,
apply_formatter_to_array(log_key_remapper),
),
# EVM
RPCEndpoint('evm_snapshot'): integer_to_hex,
},
)
def guess_from(web3: "Web3", _: TxParams) -> ChecksumAddress:
coinbase = web3.eth.coinbase
if coinbase is not None:
return coinbase
try:
return web3.eth.accounts[0]
except KeyError:
# no accounts available to pre-fill, carry on
pass
return None
@curry
def fill_default(
field: str, guess_func: Callable[..., Any], web3: "Web3", transaction: TxParams
) -> TxParams:
# type ignored b/c TxParams keys must be string literal types
if field in transaction and transaction[field] is not None: # type: ignore
return transaction
else:
guess_val = guess_func(web3, transaction)
return assoc(transaction, field, guess_val)
def default_transaction_fields_middleware(
make_request: Callable[[RPCEndpoint, Any], Any], web3: "Web3"
) -> Callable[[RPCEndpoint, Any], RPCResponse]:
fill_default_from = fill_default('from', guess_from, web3)
def middleware(method: RPCEndpoint, params: Any) -> RPCResponse:
if method in (
'eth_call',
'eth_estimateGas',
'eth_sendTransaction',
):
filled_transaction = pipe(
params[0],
fill_default_from,
)
return make_request(method, [filled_transaction] + list(params)[1:])
else:
return make_request(method, params)
return middleware
| 31.119874 | 100 | 0.693259 | import operator
from typing import (
TYPE_CHECKING,
Any,
Callable,
)
from eth_typing import (
ChecksumAddress,
)
from eth_utils import (
is_dict,
is_hex,
is_string,
)
from eth_utils.curried import (
apply_formatter_if,
apply_formatters_to_dict,
)
from eth_utils.toolz import (
assoc,
complement,
compose,
curry,
identity,
partial,
pipe,
)
from web3._utils.formatters import (
apply_formatter_to_array,
apply_formatters_to_args,
apply_key_map,
hex_to_integer,
integer_to_hex,
is_array_of_dicts,
static_return,
)
from web3.middleware import (
construct_formatting_middleware,
)
from web3.types import (
RPCEndpoint,
RPCResponse,
TxParams,
)
if TYPE_CHECKING:
from web3 import (
Web3,
)
def is_named_block(value: Any) -> bool:
return value in {"latest", "earliest", "pending"}
def is_hexstr(value: Any) -> bool:
return is_string(value) and is_hex(value)
to_integer_if_hex = apply_formatter_if(is_hexstr, hex_to_integer)
is_not_named_block = complement(is_named_block)
TRANSACTION_KEY_MAPPINGS = {
'access_list': 'accessList',
'block_hash': 'blockHash',
'block_number': 'blockNumber',
'gas_price': 'gasPrice',
'max_fee_per_gas': 'maxFeePerGas',
'max_priority_fee_per_gas': 'maxPriorityFeePerGas',
'transaction_hash': 'transactionHash',
'transaction_index': 'transactionIndex',
}
transaction_key_remapper = apply_key_map(TRANSACTION_KEY_MAPPINGS)
LOG_KEY_MAPPINGS = {
'log_index': 'logIndex',
'transaction_index': 'transactionIndex',
'transaction_hash': 'transactionHash',
'block_hash': 'blockHash',
'block_number': 'blockNumber',
}
log_key_remapper = apply_key_map(LOG_KEY_MAPPINGS)
RECEIPT_KEY_MAPPINGS = {
'block_hash': 'blockHash',
'block_number': 'blockNumber',
'contract_address': 'contractAddress',
'gas_used': 'gasUsed',
'cumulative_gas_used': 'cumulativeGasUsed',
'effective_gas_price': 'effectiveGasPrice',
'transaction_hash': 'transactionHash',
'transaction_index': 'transactionIndex',
}
receipt_key_remapper = apply_key_map(RECEIPT_KEY_MAPPINGS)
BLOCK_KEY_MAPPINGS = {
'gas_limit': 'gasLimit',
'sha3_uncles': 'sha3Uncles',
'transactions_root': 'transactionsRoot',
'parent_hash': 'parentHash',
'bloom': 'logsBloom',
'state_root': 'stateRoot',
'receipt_root': 'receiptsRoot',
'total_difficulty': 'totalDifficulty',
'extra_data': 'extraData',
'gas_used': 'gasUsed',
'base_fee_per_gas': 'baseFeePerGas',
}
block_key_remapper = apply_key_map(BLOCK_KEY_MAPPINGS)
TRANSACTION_PARAMS_MAPPING = {
'gasPrice': 'gas_price',
'maxFeePerGas': 'max_fee_per_gas',
'maxPriorityFeePerGas': 'max_priority_fee_per_gas',
'accessList': 'access_list',
}
transaction_params_remapper = apply_key_map(TRANSACTION_PARAMS_MAPPING)
REQUEST_TRANSACTION_FORMATTERS = {
'gas': to_integer_if_hex,
'gasPrice': to_integer_if_hex,
'value': to_integer_if_hex,
'nonce': to_integer_if_hex,
'maxFeePerGas': to_integer_if_hex,
'maxPriorityFeePerGas': to_integer_if_hex,
}
request_transaction_formatter = apply_formatters_to_dict(REQUEST_TRANSACTION_FORMATTERS)
FILTER_PARAMS_MAPPINGS = {
'fromBlock': 'from_block',
'toBlock': 'to_block',
}
filter_params_remapper = apply_key_map(FILTER_PARAMS_MAPPINGS)
FILTER_PARAMS_FORMATTERS = {
'fromBlock': to_integer_if_hex,
'toBlock': to_integer_if_hex,
}
filter_params_formatter = apply_formatters_to_dict(FILTER_PARAMS_FORMATTERS)
filter_params_transformer = compose(filter_params_remapper, filter_params_formatter)
RESPONSE_TRANSACTION_FORMATTERS = {
'to': apply_formatter_if(partial(operator.eq, ''), static_return(None)),
}
response_transaction_formatter = apply_formatters_to_dict(RESPONSE_TRANSACTION_FORMATTERS)
RECEIPT_FORMATTERS = {
'logs': apply_formatter_to_array(log_key_remapper),
}
receipt_formatter = apply_formatters_to_dict(RECEIPT_FORMATTERS)
transaction_params_transformer = compose(transaction_params_remapper, request_transaction_formatter)
ethereum_tester_middleware = construct_formatting_middleware(
request_formatters={
RPCEndpoint('eth_getBlockByNumber'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_getFilterChanges'): apply_formatters_to_args(hex_to_integer),
RPCEndpoint('eth_getFilterLogs'): apply_formatters_to_args(hex_to_integer),
RPCEndpoint('eth_getBlockTransactionCountByNumber'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_getUncleCountByBlockNumber'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_getTransactionByBlockHashAndIndex'): apply_formatters_to_args(
identity,
to_integer_if_hex,
),
RPCEndpoint('eth_getTransactionByBlockNumberAndIndex'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
to_integer_if_hex,
),
RPCEndpoint('eth_getUncleByBlockNumberAndIndex'): apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
to_integer_if_hex,
),
RPCEndpoint('eth_newFilter'): apply_formatters_to_args(
filter_params_transformer,
),
RPCEndpoint('eth_getLogs'): apply_formatters_to_args(
filter_params_transformer,
),
RPCEndpoint('eth_sendTransaction'): apply_formatters_to_args(
transaction_params_transformer,
),
RPCEndpoint('eth_estimateGas'): apply_formatters_to_args(
transaction_params_transformer,
),
RPCEndpoint('eth_call'): apply_formatters_to_args(
transaction_params_transformer,
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_uninstallFilter'): apply_formatters_to_args(hex_to_integer),
RPCEndpoint('eth_getCode'): apply_formatters_to_args(
identity,
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('eth_getBalance'): apply_formatters_to_args(
identity,
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
RPCEndpoint('evm_revert'): apply_formatters_to_args(hex_to_integer),
RPCEndpoint('personal_sendTransaction'): apply_formatters_to_args(
transaction_params_transformer,
identity,
),
},
result_formatters={
RPCEndpoint('eth_getBlockByHash'): apply_formatter_if(
is_dict,
block_key_remapper,
),
RPCEndpoint('eth_getBlockByNumber'): apply_formatter_if(
is_dict,
block_key_remapper,
),
RPCEndpoint('eth_getBlockTransactionCountByHash'): apply_formatter_if(
is_dict,
transaction_key_remapper,
),
RPCEndpoint('eth_getBlockTransactionCountByNumber'): apply_formatter_if(
is_dict,
transaction_key_remapper,
),
RPCEndpoint('eth_getTransactionByHash'): apply_formatter_if(
is_dict,
compose(transaction_key_remapper, response_transaction_formatter),
),
RPCEndpoint('eth_getTransactionReceipt'): apply_formatter_if(
is_dict,
compose(receipt_key_remapper, receipt_formatter),
),
RPCEndpoint('eth_newFilter'): integer_to_hex,
RPCEndpoint('eth_newBlockFilter'): integer_to_hex,
RPCEndpoint('eth_newPendingTransactionFilter'): integer_to_hex,
RPCEndpoint('eth_getLogs'): apply_formatter_if(
is_array_of_dicts,
apply_formatter_to_array(log_key_remapper),
),
RPCEndpoint('eth_getFilterChanges'): apply_formatter_if(
is_array_of_dicts,
apply_formatter_to_array(log_key_remapper),
),
RPCEndpoint('eth_getFilterLogs'): apply_formatter_if(
is_array_of_dicts,
apply_formatter_to_array(log_key_remapper),
),
RPCEndpoint('evm_snapshot'): integer_to_hex,
},
)
def guess_from(web3: "Web3", _: TxParams) -> ChecksumAddress:
coinbase = web3.eth.coinbase
if coinbase is not None:
return coinbase
try:
return web3.eth.accounts[0]
except KeyError:
pass
return None
@curry
def fill_default(
field: str, guess_func: Callable[..., Any], web3: "Web3", transaction: TxParams
) -> TxParams:
if field in transaction and transaction[field] is not None:
return transaction
else:
guess_val = guess_func(web3, transaction)
return assoc(transaction, field, guess_val)
def default_transaction_fields_middleware(
make_request: Callable[[RPCEndpoint, Any], Any], web3: "Web3"
) -> Callable[[RPCEndpoint, Any], RPCResponse]:
fill_default_from = fill_default('from', guess_from, web3)
def middleware(method: RPCEndpoint, params: Any) -> RPCResponse:
if method in (
'eth_call',
'eth_estimateGas',
'eth_sendTransaction',
):
filled_transaction = pipe(
params[0],
fill_default_from,
)
return make_request(method, [filled_transaction] + list(params)[1:])
else:
return make_request(method, params)
return middleware
| true | true |
f72fc26e54686c0677dd432b4718786ee33861af | 188 | py | Python | toal/annotators/WebAnnotators.py | Bhaskers-Blu-Org1/text-oriented-active-learning | facfb40673a59e43391b7bdb508e612dff1988d9 | [
"MIT"
] | 4 | 2020-10-23T14:42:30.000Z | 2021-06-10T13:29:04.000Z | toal/annotators/WebAnnotators.py | Bhaskers-Blu-Org1/text-oriented-active-learning | facfb40673a59e43391b7bdb508e612dff1988d9 | [
"MIT"
] | null | null | null | toal/annotators/WebAnnotators.py | Bhaskers-Blu-Org1/text-oriented-active-learning | facfb40673a59e43391b7bdb508e612dff1988d9 | [
"MIT"
] | 1 | 2020-07-30T10:35:09.000Z | 2020-07-30T10:35:09.000Z | from .AbstractAnnotator import AbstractAnnotator
class WebAnnotator(AbstractAnnotator):
def annotate(self, unlab_index, unlabeled_x, unlabeled_y):
raise NotImplementedError() | 31.333333 | 62 | 0.797872 | from .AbstractAnnotator import AbstractAnnotator
class WebAnnotator(AbstractAnnotator):
def annotate(self, unlab_index, unlabeled_x, unlabeled_y):
raise NotImplementedError() | true | true |
f72fc3c32c354207da9306ce6997164be7d90d1b | 9,544 | py | Python | venv/lib/python3.8/site-packages/awscli/customizations/eks/kubeconfig.py | sr9dc/DS_Systems_Project_2 | 0b348c1dd300756f732b4ce13e04239036dc601a | [
"MIT"
] | 4 | 2022-01-07T13:37:33.000Z | 2022-03-31T03:21:17.000Z | venv/lib/python3.8/site-packages/awscli/customizations/eks/kubeconfig.py | sr9dc/DS_Systems_Project_2 | 0b348c1dd300756f732b4ce13e04239036dc601a | [
"MIT"
] | 1 | 2022-01-27T04:21:58.000Z | 2022-01-27T04:21:58.000Z | venv/lib/python3.8/site-packages/awscli/customizations/eks/kubeconfig.py | sr9dc/DS_Systems_Project_2 | 0b348c1dd300756f732b4ce13e04239036dc601a | [
"MIT"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import yaml
import logging
import errno
from botocore.compat import OrderedDict
from awscli.customizations.eks.exceptions import EKSError
from awscli.customizations.eks.ordered_yaml import (ordered_yaml_load,
ordered_yaml_dump)
class KubeconfigError(EKSError):
""" Base class for all kubeconfig errors."""
class KubeconfigCorruptedError(KubeconfigError):
""" Raised when a kubeconfig cannot be parsed."""
class KubeconfigInaccessableError(KubeconfigError):
""" Raised when a kubeconfig cannot be opened for read/writing."""
def _get_new_kubeconfig_content():
return OrderedDict([
("apiVersion", "v1"),
("clusters", []),
("contexts", []),
("current-context", ""),
("kind", "Config"),
("preferences", OrderedDict()),
("users", [])
])
class Kubeconfig(object):
def __init__(self, path, content=None):
self.path = path
if content is None:
content = _get_new_kubeconfig_content()
self.content = content
def dump_content(self):
""" Return the stored content in yaml format. """
return ordered_yaml_dump(self.content)
def has_cluster(self, name):
"""
Return true if this kubeconfig contains an entry
For the passed cluster name.
"""
if 'clusters' not in self.content:
return False
return name in [cluster['name']
for cluster in self.content['clusters']]
class KubeconfigValidator(object):
def __init__(self):
# Validation_content is an empty Kubeconfig
# It is used as a way to know what types different entries should be
self._validation_content = Kubeconfig(None, None).content
def validate_config(self, config):
"""
Raises KubeconfigCorruptedError if the passed content is invalid
:param config: The config to validate
:type config: Kubeconfig
"""
if not isinstance(config, Kubeconfig):
raise KubeconfigCorruptedError("Internal error: "
"Not a Kubeconfig object.")
self._validate_config_types(config)
self._validate_list_entry_types(config)
def _validate_config_types(self, config):
"""
Raises KubeconfigCorruptedError if any of the entries in config
are the wrong type
:param config: The config to validate
:type config: Kubeconfig
"""
if not isinstance(config.content, dict):
raise KubeconfigCorruptedError("Content not a dictionary.")
for key, value in self._validation_content.items():
if (key in config.content and
config.content[key] is not None and
not isinstance(config.content[key], type(value))):
raise KubeconfigCorruptedError(
"{0} is wrong type:{1} "
"(Should be {2})".format(
key,
type(config.content[key]),
type(value)
)
)
def _validate_list_entry_types(self, config):
"""
Raises KubeconfigCorruptedError if any lists in config contain objects
which are not dictionaries
:param config: The config to validate
:type config: Kubeconfig
"""
for key, value in self._validation_content.items():
if (key in config.content and
type(config.content[key]) == list):
for element in config.content[key]:
if not isinstance(element, OrderedDict):
raise KubeconfigCorruptedError(
"Entry in {0} not a dictionary.".format(key))
class KubeconfigLoader(object):
def __init__(self, validator=None):
if validator is None:
validator = KubeconfigValidator()
self._validator = validator
def load_kubeconfig(self, path):
"""
Loads the kubeconfig found at the given path.
If no file is found at the given path,
Generate a new kubeconfig to write back.
If the kubeconfig is valid, loads the content from it.
If the kubeconfig is invalid, throw the relevant exception.
:param path: The path to load a kubeconfig from
:type path: string
:raises KubeconfigInaccessableError: if the kubeconfig can't be opened
:raises KubeconfigCorruptedError: if the kubeconfig is invalid
:return: The loaded kubeconfig
:rtype: Kubeconfig
"""
try:
with open(path, "r") as stream:
loaded_content = ordered_yaml_load(stream)
except IOError as e:
if e.errno == errno.ENOENT:
loaded_content = None
else:
raise KubeconfigInaccessableError(
"Can't open kubeconfig for reading: {0}".format(e))
except yaml.YAMLError as e:
raise KubeconfigCorruptedError(
"YamlError while loading kubeconfig: {0}".format(e))
loaded_config = Kubeconfig(path, loaded_content)
self._validator.validate_config(loaded_config)
return loaded_config
class KubeconfigWriter(object):
def write_kubeconfig(self, config):
"""
Write config to disk.
OK if the file doesn't exist.
:param config: The kubeconfig to write
:type config: Kubeconfig
:raises KubeconfigInaccessableError: if the kubeconfig
can't be opened for writing
"""
directory = os.path.dirname(config.path)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise KubeconfigInaccessableError(
"Can't create directory for writing: {0}".format(e))
try:
with os.fdopen(
os.open(
config.path,
os.O_CREAT | os.O_RDWR | os.O_TRUNC,
0o600),
"w+") as stream:
ordered_yaml_dump(config.content, stream)
except (IOError, OSError) as e:
raise KubeconfigInaccessableError(
"Can't open kubeconfig for writing: {0}".format(e))
class KubeconfigAppender(object):
def insert_entry(self, config, key, entry):
"""
Insert entry into the array at content[key]
Overwrite an existing entry if they share the same name
:param config: The kubeconfig to insert an entry into
:type config: Kubeconfig
"""
if key not in config.content:
config.content[key] = []
array = config.content[key]
if not isinstance(array, list):
raise KubeconfigError("Tried to insert into {0},"
"which is a {1} "
"not a {2}".format(key,
type(array),
list))
found = False
for counter, existing_entry in enumerate(array):
if "name" in existing_entry and\
"name" in entry and\
existing_entry["name"] == entry["name"]:
array[counter] = entry
found = True
if not found:
array.append(entry)
config.content[key] = array
return config
def _make_context(self, cluster, user, alias=None):
""" Generate a context to associate cluster and user with a given alias."""
return OrderedDict([
("context", OrderedDict([
("cluster", cluster["name"]),
("user", user["name"])
])),
("name", alias or user["name"])
])
def insert_cluster_user_pair(self, config, cluster, user, alias=None):
"""
Insert the passed cluster entry and user entry,
then make a context to associate them
and set current-context to be the new context.
Returns the new context
:param config: the Kubeconfig to insert the pair into
:type config: Kubeconfig
:param cluster: the cluster entry
:type cluster: OrderedDict
:param user: the user entry
:type user: OrderedDict
:param alias: the alias for the context; defaults top user entry name
:type context: str
:return: The generated context
:rtype: OrderedDict
"""
context = self._make_context(cluster, user, alias=alias)
self.insert_entry(config, "clusters", cluster)
self.insert_entry(config, "users", user)
self.insert_entry(config, "contexts", context)
config.content["current-context"] = context["name"]
return context
| 34.454874 | 83 | 0.585813 |
import os
import yaml
import logging
import errno
from botocore.compat import OrderedDict
from awscli.customizations.eks.exceptions import EKSError
from awscli.customizations.eks.ordered_yaml import (ordered_yaml_load,
ordered_yaml_dump)
class KubeconfigError(EKSError):
class KubeconfigCorruptedError(KubeconfigError):
class KubeconfigInaccessableError(KubeconfigError):
def _get_new_kubeconfig_content():
return OrderedDict([
("apiVersion", "v1"),
("clusters", []),
("contexts", []),
("current-context", ""),
("kind", "Config"),
("preferences", OrderedDict()),
("users", [])
])
class Kubeconfig(object):
def __init__(self, path, content=None):
self.path = path
if content is None:
content = _get_new_kubeconfig_content()
self.content = content
def dump_content(self):
return ordered_yaml_dump(self.content)
def has_cluster(self, name):
if 'clusters' not in self.content:
return False
return name in [cluster['name']
for cluster in self.content['clusters']]
class KubeconfigValidator(object):
def __init__(self):
self._validation_content = Kubeconfig(None, None).content
def validate_config(self, config):
if not isinstance(config, Kubeconfig):
raise KubeconfigCorruptedError("Internal error: "
"Not a Kubeconfig object.")
self._validate_config_types(config)
self._validate_list_entry_types(config)
def _validate_config_types(self, config):
if not isinstance(config.content, dict):
raise KubeconfigCorruptedError("Content not a dictionary.")
for key, value in self._validation_content.items():
if (key in config.content and
config.content[key] is not None and
not isinstance(config.content[key], type(value))):
raise KubeconfigCorruptedError(
"{0} is wrong type:{1} "
"(Should be {2})".format(
key,
type(config.content[key]),
type(value)
)
)
def _validate_list_entry_types(self, config):
for key, value in self._validation_content.items():
if (key in config.content and
type(config.content[key]) == list):
for element in config.content[key]:
if not isinstance(element, OrderedDict):
raise KubeconfigCorruptedError(
"Entry in {0} not a dictionary.".format(key))
class KubeconfigLoader(object):
def __init__(self, validator=None):
if validator is None:
validator = KubeconfigValidator()
self._validator = validator
def load_kubeconfig(self, path):
try:
with open(path, "r") as stream:
loaded_content = ordered_yaml_load(stream)
except IOError as e:
if e.errno == errno.ENOENT:
loaded_content = None
else:
raise KubeconfigInaccessableError(
"Can't open kubeconfig for reading: {0}".format(e))
except yaml.YAMLError as e:
raise KubeconfigCorruptedError(
"YamlError while loading kubeconfig: {0}".format(e))
loaded_config = Kubeconfig(path, loaded_content)
self._validator.validate_config(loaded_config)
return loaded_config
class KubeconfigWriter(object):
def write_kubeconfig(self, config):
directory = os.path.dirname(config.path)
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise KubeconfigInaccessableError(
"Can't create directory for writing: {0}".format(e))
try:
with os.fdopen(
os.open(
config.path,
os.O_CREAT | os.O_RDWR | os.O_TRUNC,
0o600),
"w+") as stream:
ordered_yaml_dump(config.content, stream)
except (IOError, OSError) as e:
raise KubeconfigInaccessableError(
"Can't open kubeconfig for writing: {0}".format(e))
class KubeconfigAppender(object):
def insert_entry(self, config, key, entry):
if key not in config.content:
config.content[key] = []
array = config.content[key]
if not isinstance(array, list):
raise KubeconfigError("Tried to insert into {0},"
"which is a {1} "
"not a {2}".format(key,
type(array),
list))
found = False
for counter, existing_entry in enumerate(array):
if "name" in existing_entry and\
"name" in entry and\
existing_entry["name"] == entry["name"]:
array[counter] = entry
found = True
if not found:
array.append(entry)
config.content[key] = array
return config
def _make_context(self, cluster, user, alias=None):
return OrderedDict([
("context", OrderedDict([
("cluster", cluster["name"]),
("user", user["name"])
])),
("name", alias or user["name"])
])
def insert_cluster_user_pair(self, config, cluster, user, alias=None):
context = self._make_context(cluster, user, alias=alias)
self.insert_entry(config, "clusters", cluster)
self.insert_entry(config, "users", user)
self.insert_entry(config, "contexts", context)
config.content["current-context"] = context["name"]
return context
| true | true |
f72fc4014791e9cb00ad25357fa03b020d005be5 | 8,006 | py | Python | mxdc/devices/cryojet.py | michel4j/mxdc | 844f0854cc696553c8a51f8e9b5b06a8e4345261 | [
"BSD-3-Clause"
] | 2 | 2018-10-23T19:05:40.000Z | 2021-03-18T20:06:32.000Z | mxdc/devices/cryojet.py | michel4j/mxdc | 844f0854cc696553c8a51f8e9b5b06a8e4345261 | [
"BSD-3-Clause"
] | null | null | null | mxdc/devices/cryojet.py | michel4j/mxdc | 844f0854cc696553c8a51f8e9b5b06a8e4345261 | [
"BSD-3-Clause"
] | null | null | null | from enum import Enum
from gi.repository import GLib
from zope.interface import implementer
import mxdc.devices.shutter
from mxdc import Device, Signal, Property
from mxdc.devices import misc
from mxdc.utils.log import get_module_logger
from .interfaces import ICryostat
logger = get_module_logger(__name__)
class CryoJetNozzle(mxdc.devices.shutter.EPICSShutter):
"""
A specialized in-out actuator for pneumatic Cryojet nozzles.
:param name: The process variable name of the devices
"""
def __init__(self, name):
open_name = "%s:opr:open" % name
close_name = "%s:opr:close" % name
state_name = "%s:out" % name
mxdc.devices.shutter.EPICSShutter.__init__(self, open_name, close_name, state_name)
self._messages = ['Restoring', 'Retracting']
self._name = 'Cryojet Nozzle'
@implementer(ICryostat)
class CryostatBase(Device):
"""
Base class for all cryostat devices. A cryostat maintains low temperatures at the sample position.
Signals:
- temp (float,): Sample temperature
- level (float,): Cryogen level
- sample (float,): Cryogen flow-rate
- shield (float,): Shield flow-rate
"""
class Positions(Enum):
IN, OUT = range(2)
class Signals:
temp = Signal('temp', arg_types=(float,))
level = Signal('level', arg_types=(float,))
sample = Signal('sample', arg_types=(float,))
shield = Signal('shield', arg_types=(float,))
pos = Signal('position', arg_types=(object,))
# Properties
temperature = Property(type=float, default=0.0)
shield = Property(type=float, default=0.0)
sample = Property(type=float, default=0.0)
level = Property(type=float, default=0.0)
def configure(self, temp=None, sample=None, shield=None, position=None):
"""
Configure the Cryostat.
:param temp: Set the target sample temperature
:param sample: Set the sample flow rate
:param shield: Set the shield flow rate
:param position: If the cryostat set the position. Should be one of Positions.IN, Positions.OUT
"""
def stop(self):
"""
Stop the cryostat
"""
def start(self):
"""
Start the cryostat
"""
@implementer(ICryostat)
class CryoJetBase(Device):
"""
Cryogenic Nozzle Jet Device
"""
temperature = Property(type=float, default=0.0)
shield = Property(type=float, default=0.0)
sample = Property(type=float, default=0.0)
level = Property(type=float, default=0.0)
def __init__(self, *args, **kwargs):
super().__init__()
self.name = 'Cryojet'
self._previous_flow = 7.0
self.setup(*args, **kwargs)
def setup(self, *args, **kwargs):
pass
def anneal(self, duration):
"""
Anneal for the specified duration
:param duration: duration in seconds to stop cooling
"""
pass
def on_temp(self, obj, val):
if val < 110:
self.set_state(health=(0, 'temp', ''))
elif val < 115:
self.set_state(health=(2, 'temp', 'Temp. high!'))
else:
self.set_state(health=(4, 'temp', 'Temp. too high!'))
self.set_property('temperature', val)
def on_sample(self, obj, val):
if val > 5:
self.set_state(health=(0, 'sample', ''))
elif val > 4:
self.set_state(health=(2, 'sample', 'Sample Flow Low!'))
else:
self.set_state(health=(4, 'sample','Sample Flow Too Low!'))
self.set_property('sample', val)
def on_shield(self, obj, val):
if val > 5:
self.set_state(health=(0, 'shield', ''))
elif val > 4:
self.set_state(health=(2, 'shield','Shield Flow Low!'))
else:
self.set_state(health=(4, 'shield','Shield Flow Too Low!'))
self.set_property('shield', val)
def on_level(self, obj, val):
if val < 15:
self.set_state(health=(4, 'cryo','Cryogen too low!'))
elif val < 20:
self.set_state(health=(2, 'cryo','Cryogen low!'))
else:
self.set_state(health=(0, 'cryo', ''))
self.set_property('level', val)
def on_nozzle(self, obj, val):
if val:
self.set_state(health=(1, 'nozzle', 'Retracted!'))
else:
self.set_state(health=(0, 'nozzle', 'Restored'))
class CryoJet(CryoJetBase):
def setup(self, name, level_name, nozzle_name):
self.temp_fbk = self.add_pv('{}:sensorTemp:get'.format(name))
self.sample_fbk = self.add_pv('{}:SampleFlow:get'.format(name))
self.shield_fbk = self.add_pv('{}:ShieldFlow:get'.format(name))
self.sample_sp = self.add_pv('{}:sampleFlow:set'.format(name))
self.level_fbk = self.add_pv('{}:ch1LVL:get'.format(level_name))
self.fill_status = self.add_pv('{}:status:ch1:N.SVAL'.format(level_name))
self.nozzle = CryoJetNozzle(nozzle_name)
# connect signals for monitoring state
self.temp_fbk.connect('changed', self.on_temp)
self.level_fbk.connect('changed', self.on_level)
self.sample_fbk.connect('changed', self.on_sample)
self.sample_fbk.connect('changed', self.on_shield)
self.nozzle.connect('changed', self.on_nozzle)
def on_level(self, obj, val):
if val < 150:
self.set_state(health=(4, 'cryo','Cryogen too low!'))
elif val < 200:
self.set_state(health=(2, 'cryo','Cryogen low!'))
else:
self.set_state(health=(0, 'cryo', ''))
self.set_property('level', val/10.)
def anneal(self, duration):
previous_flow = self.sample_fbk.get()
self.sample_sp.put(0.0)
GLib.timeout_add(duration*1000, self.sample_sp.put, previous_flow)
class CryoJet5(CryoJetBase):
def setup(self, name, nozzle_name):
self.temp_fbk = self.add_pv('{}:sample:temp:fbk'.format(name))
self.sample_fbk = self.add_pv('{}:sample:flow:fbk'.format(name))
self.shield_fbk = self.add_pv('{}:shield:flow:fbk'.format(name))
self.sample_sp = self.add_pv('{}:sample:flow'.format(name))
self.level_fbk = self.add_pv('{}:autofill:level:fbk'.format(name))
self.fill_status = self.add_pv('{}:autofill:state'.format(name))
self.nozzle = CryoJetNozzle(nozzle_name)
# connect signals for monitoring state
self.temp_fbk.connect('changed', self.on_temp)
self.level_fbk.connect('changed', self.on_level)
self.sample_fbk.connect('changed', self.on_sample)
self.shield_fbk.connect('changed', self.on_shield)
self.nozzle.connect('changed', self.on_nozzle)
class SimCryoJet(CryoJetBase):
def setup(self, *args, **kwargs):
self.nozzle = mxdc.devices.shutter.SimShutter('Sim Cryo Nozzle')
self.temp_fbk = misc.SimPositioner('Cryo Temperature', pos=102.5, noise=3)
self.sample_fbk = misc.SimPositioner('Cryo Sample flow', pos=6.5, noise=1)
self.shield_fbk = misc.SimPositioner('Cryo Shield flow', pos=9.5, noise=1)
self.level_fbk = misc.SimPositioner('Cryo Level', pos=35.5, noise=10)
self.name = 'Sim CryoJet'
# connect signals for monitoring state
self.temp_fbk.connect('changed', self.on_temp)
self.level_fbk.connect('changed', self.on_level)
self.sample_fbk.connect('changed', self.on_sample)
self.shield_fbk.connect('changed', self.on_shield)
self.nozzle.connect('changed', self.on_nozzle)
def _simulate_nozzle(self, *args, **kwargs):
if self.nozzle.is_open():
self.nozzle.close()
else:
self.nozzle.open()
return True
def anneal(self, duration):
previous_flow = self.sample_fbk.get()
self.sample_sp.put(0.0)
GLib.timeout_add(duration*1000, self.sample_fbk.put, previous_flow)
__all__ = ['CryoJet', 'CryoJet5', 'SimCryoJet']
| 33.923729 | 103 | 0.62066 | from enum import Enum
from gi.repository import GLib
from zope.interface import implementer
import mxdc.devices.shutter
from mxdc import Device, Signal, Property
from mxdc.devices import misc
from mxdc.utils.log import get_module_logger
from .interfaces import ICryostat
logger = get_module_logger(__name__)
class CryoJetNozzle(mxdc.devices.shutter.EPICSShutter):
def __init__(self, name):
open_name = "%s:opr:open" % name
close_name = "%s:opr:close" % name
state_name = "%s:out" % name
mxdc.devices.shutter.EPICSShutter.__init__(self, open_name, close_name, state_name)
self._messages = ['Restoring', 'Retracting']
self._name = 'Cryojet Nozzle'
@implementer(ICryostat)
class CryostatBase(Device):
class Positions(Enum):
IN, OUT = range(2)
class Signals:
temp = Signal('temp', arg_types=(float,))
level = Signal('level', arg_types=(float,))
sample = Signal('sample', arg_types=(float,))
shield = Signal('shield', arg_types=(float,))
pos = Signal('position', arg_types=(object,))
temperature = Property(type=float, default=0.0)
shield = Property(type=float, default=0.0)
sample = Property(type=float, default=0.0)
level = Property(type=float, default=0.0)
def configure(self, temp=None, sample=None, shield=None, position=None):
def stop(self):
def start(self):
@implementer(ICryostat)
class CryoJetBase(Device):
temperature = Property(type=float, default=0.0)
shield = Property(type=float, default=0.0)
sample = Property(type=float, default=0.0)
level = Property(type=float, default=0.0)
def __init__(self, *args, **kwargs):
super().__init__()
self.name = 'Cryojet'
self._previous_flow = 7.0
self.setup(*args, **kwargs)
def setup(self, *args, **kwargs):
pass
def anneal(self, duration):
pass
def on_temp(self, obj, val):
if val < 110:
self.set_state(health=(0, 'temp', ''))
elif val < 115:
self.set_state(health=(2, 'temp', 'Temp. high!'))
else:
self.set_state(health=(4, 'temp', 'Temp. too high!'))
self.set_property('temperature', val)
def on_sample(self, obj, val):
if val > 5:
self.set_state(health=(0, 'sample', ''))
elif val > 4:
self.set_state(health=(2, 'sample', 'Sample Flow Low!'))
else:
self.set_state(health=(4, 'sample','Sample Flow Too Low!'))
self.set_property('sample', val)
def on_shield(self, obj, val):
if val > 5:
self.set_state(health=(0, 'shield', ''))
elif val > 4:
self.set_state(health=(2, 'shield','Shield Flow Low!'))
else:
self.set_state(health=(4, 'shield','Shield Flow Too Low!'))
self.set_property('shield', val)
def on_level(self, obj, val):
if val < 15:
self.set_state(health=(4, 'cryo','Cryogen too low!'))
elif val < 20:
self.set_state(health=(2, 'cryo','Cryogen low!'))
else:
self.set_state(health=(0, 'cryo', ''))
self.set_property('level', val)
def on_nozzle(self, obj, val):
if val:
self.set_state(health=(1, 'nozzle', 'Retracted!'))
else:
self.set_state(health=(0, 'nozzle', 'Restored'))
class CryoJet(CryoJetBase):
def setup(self, name, level_name, nozzle_name):
self.temp_fbk = self.add_pv('{}:sensorTemp:get'.format(name))
self.sample_fbk = self.add_pv('{}:SampleFlow:get'.format(name))
self.shield_fbk = self.add_pv('{}:ShieldFlow:get'.format(name))
self.sample_sp = self.add_pv('{}:sampleFlow:set'.format(name))
self.level_fbk = self.add_pv('{}:ch1LVL:get'.format(level_name))
self.fill_status = self.add_pv('{}:status:ch1:N.SVAL'.format(level_name))
self.nozzle = CryoJetNozzle(nozzle_name)
self.temp_fbk.connect('changed', self.on_temp)
self.level_fbk.connect('changed', self.on_level)
self.sample_fbk.connect('changed', self.on_sample)
self.sample_fbk.connect('changed', self.on_shield)
self.nozzle.connect('changed', self.on_nozzle)
def on_level(self, obj, val):
if val < 150:
self.set_state(health=(4, 'cryo','Cryogen too low!'))
elif val < 200:
self.set_state(health=(2, 'cryo','Cryogen low!'))
else:
self.set_state(health=(0, 'cryo', ''))
self.set_property('level', val/10.)
def anneal(self, duration):
previous_flow = self.sample_fbk.get()
self.sample_sp.put(0.0)
GLib.timeout_add(duration*1000, self.sample_sp.put, previous_flow)
class CryoJet5(CryoJetBase):
def setup(self, name, nozzle_name):
self.temp_fbk = self.add_pv('{}:sample:temp:fbk'.format(name))
self.sample_fbk = self.add_pv('{}:sample:flow:fbk'.format(name))
self.shield_fbk = self.add_pv('{}:shield:flow:fbk'.format(name))
self.sample_sp = self.add_pv('{}:sample:flow'.format(name))
self.level_fbk = self.add_pv('{}:autofill:level:fbk'.format(name))
self.fill_status = self.add_pv('{}:autofill:state'.format(name))
self.nozzle = CryoJetNozzle(nozzle_name)
self.temp_fbk.connect('changed', self.on_temp)
self.level_fbk.connect('changed', self.on_level)
self.sample_fbk.connect('changed', self.on_sample)
self.shield_fbk.connect('changed', self.on_shield)
self.nozzle.connect('changed', self.on_nozzle)
class SimCryoJet(CryoJetBase):
def setup(self, *args, **kwargs):
self.nozzle = mxdc.devices.shutter.SimShutter('Sim Cryo Nozzle')
self.temp_fbk = misc.SimPositioner('Cryo Temperature', pos=102.5, noise=3)
self.sample_fbk = misc.SimPositioner('Cryo Sample flow', pos=6.5, noise=1)
self.shield_fbk = misc.SimPositioner('Cryo Shield flow', pos=9.5, noise=1)
self.level_fbk = misc.SimPositioner('Cryo Level', pos=35.5, noise=10)
self.name = 'Sim CryoJet'
self.temp_fbk.connect('changed', self.on_temp)
self.level_fbk.connect('changed', self.on_level)
self.sample_fbk.connect('changed', self.on_sample)
self.shield_fbk.connect('changed', self.on_shield)
self.nozzle.connect('changed', self.on_nozzle)
def _simulate_nozzle(self, *args, **kwargs):
if self.nozzle.is_open():
self.nozzle.close()
else:
self.nozzle.open()
return True
def anneal(self, duration):
previous_flow = self.sample_fbk.get()
self.sample_sp.put(0.0)
GLib.timeout_add(duration*1000, self.sample_fbk.put, previous_flow)
__all__ = ['CryoJet', 'CryoJet5', 'SimCryoJet']
| true | true |
f72fc514b8852f9e17acbed322e8818424cb6190 | 59,548 | py | Python | test/azure/Expected/AcceptanceTests/Paging/paging/aio/operations/_paging_operations.py | amrElroumy/autorest.python | b37af1779f6d53b4fa0d92da62151f8133006f98 | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/Paging/paging/aio/operations/_paging_operations.py | amrElroumy/autorest.python | b37af1779f6d53b4fa0d92da62151f8133006f98 | [
"MIT"
] | null | null | null | test/azure/Expected/AcceptanceTests/Paging/paging/aio/operations/_paging_operations.py | amrElroumy/autorest.python | b37af1779f6d53b4fa0d92da62151f8133006f98 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.polling.async_base_polling import AsyncLROBasePolling
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PagingOperations:
"""PagingOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~paging.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_no_item_name_pages(self, **kwargs) -> AsyncIterable["_models.ProductResultValue"]:
"""A paging operation that must return result of the default 'value' node.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResultValue or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResultValue]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResultValue"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_no_item_name_pages.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResultValue", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_no_item_name_pages.metadata = {"url": "/paging/noitemname"} # type: ignore
@distributed_trace
def get_null_next_link_name_pages(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that must ignore any kind of nextLink, and stop after page 1.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_null_next_link_name_pages.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_null_next_link_name_pages.metadata = {"url": "/paging/nullnextlink"} # type: ignore
@distributed_trace
def get_single_pages(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that finishes on the first call without a nextlink.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_single_pages.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_single_pages.metadata = {"url": "/paging/single"} # type: ignore
@distributed_trace
def first_response_empty(self, **kwargs) -> AsyncIterable["_models.ProductResultValue"]:
"""A paging operation whose first response's items list is empty, but still returns a next link.
Second (and final) call, will give you an items list of 1.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResultValue or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResultValue]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResultValue"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.first_response_empty.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResultValue", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
first_response_empty.metadata = {"url": "/paging/firstResponseEmpty/1"} # type: ignore
@distributed_trace
def get_multiple_pages(
self,
client_request_id: Optional[str] = None,
paging_get_multiple_pages_options: Optional["_models.PagingGetMultiplePagesOptions"] = None,
**kwargs
) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that includes a nextLink that has 10 pages.
:param client_request_id:
:type client_request_id: str
:param paging_get_multiple_pages_options: Parameter group.
:type paging_get_multiple_pages_options: ~paging.models.PagingGetMultiplePagesOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_timeout = None
if paging_get_multiple_pages_options is not None:
_maxresults = paging_get_multiple_pages_options.maxresults
_timeout = paging_get_multiple_pages_options.timeout
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_multiple_pages.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages.metadata = {"url": "/paging/multiple"} # type: ignore
@distributed_trace
def get_with_query_params(self, required_query_parameter: int, **kwargs) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that includes a next operation. It has a different query parameter from it's
next operation nextOperationWithQueryParams. Returns a ProductResult.
:param required_query_parameter: A required integer query parameter. Put in value '100' to pass
test.
:type required_query_parameter: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
query_constant = True
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_with_query_params.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters["requiredQueryParameter"] = self._serialize.query(
"required_query_parameter", required_query_parameter, "int"
)
query_parameters["queryConstant"] = self._serialize.query("query_constant", query_constant, "bool")
request = self._client.get(url, query_parameters, header_parameters)
else:
url = "/paging/multiple/nextOperationWithQueryParams"
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters["queryConstant"] = self._serialize.query("query_constant", query_constant, "bool")
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_with_query_params.metadata = {"url": "/paging/multiple/getWithQueryParams"} # type: ignore
@distributed_trace
def get_odata_multiple_pages(
self,
client_request_id: Optional[str] = None,
paging_get_odata_multiple_pages_options: Optional["_models.PagingGetOdataMultiplePagesOptions"] = None,
**kwargs
) -> AsyncIterable["_models.OdataProductResult"]:
"""A paging operation that includes a nextLink in odata format that has 10 pages.
:param client_request_id:
:type client_request_id: str
:param paging_get_odata_multiple_pages_options: Parameter group.
:type paging_get_odata_multiple_pages_options: ~paging.models.PagingGetOdataMultiplePagesOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OdataProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.OdataProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.OdataProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_timeout = None
if paging_get_odata_multiple_pages_options is not None:
_maxresults = paging_get_odata_multiple_pages_options.maxresults
_timeout = paging_get_odata_multiple_pages_options.timeout
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_odata_multiple_pages.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OdataProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_odata_multiple_pages.metadata = {"url": "/paging/multiple/odata"} # type: ignore
@distributed_trace
def get_multiple_pages_with_offset(
self,
paging_get_multiple_pages_with_offset_options: "_models.PagingGetMultiplePagesWithOffsetOptions",
client_request_id: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that includes a nextLink that has 10 pages.
:param paging_get_multiple_pages_with_offset_options: Parameter group.
:type paging_get_multiple_pages_with_offset_options: ~paging.models.PagingGetMultiplePagesWithOffsetOptions
:param client_request_id:
:type client_request_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_offset = None
_timeout = None
if paging_get_multiple_pages_with_offset_options is not None:
_maxresults = paging_get_multiple_pages_with_offset_options.maxresults
_offset = paging_get_multiple_pages_with_offset_options.offset
_timeout = paging_get_multiple_pages_with_offset_options.timeout
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_multiple_pages_with_offset.metadata["url"] # type: ignore
path_format_arguments = {
"offset": self._serialize.url("offset", _offset, "int"),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_with_offset.metadata = {"url": "/paging/multiple/withpath/{offset}"} # type: ignore
@distributed_trace
def get_multiple_pages_retry_first(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that fails on the first call with 500 and then retries and then get a
response including a nextLink that has 10 pages.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_multiple_pages_retry_first.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_retry_first.metadata = {"url": "/paging/multiple/retryfirst"} # type: ignore
@distributed_trace
def get_multiple_pages_retry_second(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that includes a nextLink that has 10 pages, of which the 2nd call fails
first with 500. The client should retry and finish all 10 pages eventually.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_multiple_pages_retry_second.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_retry_second.metadata = {"url": "/paging/multiple/retrysecond"} # type: ignore
@distributed_trace
def get_single_pages_failure(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that receives a 400 on the first call.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_single_pages_failure.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_single_pages_failure.metadata = {"url": "/paging/single/failure"} # type: ignore
@distributed_trace
def get_multiple_pages_failure(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that receives a 400 on the second call.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_multiple_pages_failure.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_failure.metadata = {"url": "/paging/multiple/failure"} # type: ignore
@distributed_trace
def get_multiple_pages_failure_uri(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
"""A paging operation that receives an invalid nextLink.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_multiple_pages_failure_uri.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_failure_uri.metadata = {"url": "/paging/multiple/failureuri"} # type: ignore
@distributed_trace
def get_multiple_pages_fragment_next_link(
self, api_version: str, tenant: str, **kwargs
) -> AsyncIterable["_models.OdataProductResult"]:
"""A paging operation that doesn't return a full URL, just a fragment.
:param api_version: Sets the api version to use.
:type api_version: str
:param tenant: Sets the tenant to use.
:type tenant: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OdataProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.OdataProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.OdataProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_multiple_pages_fragment_next_link.metadata["url"] # type: ignore
path_format_arguments = {
"tenant": self._serialize.url("tenant", tenant, "str"),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters["api_version"] = self._serialize.query("api_version", api_version, "str")
request = self._client.get(url, query_parameters, header_parameters)
else:
url = "/paging/multiple/fragment/{tenant}/{nextLink}"
path_format_arguments = {
"tenant": self._serialize.url("tenant", tenant, "str"),
"nextLink": self._serialize.url("next_link", next_link, "str", skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters["api_version"] = self._serialize.query("api_version", api_version, "str")
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OdataProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_fragment_next_link.metadata = {"url": "/paging/multiple/fragment/{tenant}"} # type: ignore
@distributed_trace
def get_multiple_pages_fragment_with_grouping_next_link(
self, custom_parameter_group: "_models.CustomParameterGroup", **kwargs
) -> AsyncIterable["_models.OdataProductResult"]:
"""A paging operation that doesn't return a full URL, just a fragment with parameters grouped.
:param custom_parameter_group: Parameter group.
:type custom_parameter_group: ~paging.models.CustomParameterGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OdataProductResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.OdataProductResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.OdataProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_api_version = None
_tenant = None
if custom_parameter_group is not None:
_api_version = custom_parameter_group.api_version
_tenant = custom_parameter_group.tenant
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_multiple_pages_fragment_with_grouping_next_link.metadata["url"] # type: ignore
path_format_arguments = {
"tenant": self._serialize.url("tenant", _tenant, "str"),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters["api_version"] = self._serialize.query("api_version", _api_version, "str")
request = self._client.get(url, query_parameters, header_parameters)
else:
url = "/paging/multiple/fragmentwithgrouping/{tenant}/{nextLink}"
path_format_arguments = {
"tenant": self._serialize.url("tenant", _tenant, "str"),
"nextLink": self._serialize.url("next_link", next_link, "str", skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters["api_version"] = self._serialize.query("api_version", _api_version, "str")
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OdataProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_fragment_with_grouping_next_link.metadata = {"url": "/paging/multiple/fragmentwithgrouping/{tenant}"} # type: ignore
async def _get_multiple_pages_lro_initial(
self,
client_request_id: Optional[str] = None,
paging_get_multiple_pages_lro_options: Optional["_models.PagingGetMultiplePagesLroOptions"] = None,
**kwargs
) -> "_models.ProductResult":
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_timeout = None
if paging_get_multiple_pages_lro_options is not None:
_maxresults = paging_get_multiple_pages_lro_options.maxresults
_timeout = paging_get_multiple_pages_lro_options.timeout
accept = "application/json"
# Construct URL
url = self._get_multiple_pages_lro_initial.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize("ProductResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_multiple_pages_lro_initial.metadata = {"url": "/paging/multiple/lro"} # type: ignore
@distributed_trace_async
async def begin_get_multiple_pages_lro(
self,
client_request_id: Optional[str] = None,
paging_get_multiple_pages_lro_options: Optional["_models.PagingGetMultiplePagesLroOptions"] = None,
**kwargs
) -> AsyncLROPoller[AsyncItemPaged["_models.ProductResult"]]:
"""A long-running paging operation that includes a nextLink that has 10 pages.
:param client_request_id:
:type client_request_id: str
:param paging_get_multiple_pages_lro_options: Parameter group.
:type paging_get_multiple_pages_lro_options: ~paging.models.PagingGetMultiplePagesLroOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncLROBasePolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns an iterator like instance of either ProductResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResult]]
:raises ~azure.core.exceptions.HttpResponseError:
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_timeout = None
if paging_get_multiple_pages_lro_options is not None:
_maxresults = paging_get_multiple_pages_lro_options.maxresults
_timeout = paging_get_multiple_pages_lro_options.timeout
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_multiple_pages_lro.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
polling = kwargs.pop("polling", False) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResult"]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_multiple_pages_lro_initial(
client_request_id=client_request_id,
paging_get_multiple_pages_lro_options=paging_get_multiple_pages_lro_options,
cls=lambda x, y, z: x,
**kwargs
)
kwargs.pop("error_map", None)
kwargs.pop("content_type", None)
def get_long_running_output(pipeline_response):
async def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
else:
return await get_next(next_link)
return AsyncItemPaged(internal_get_next, extract_data)
if polling is True:
polling_method = AsyncLROBasePolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_multiple_pages_lro.metadata = {"url": "/paging/multiple/lro"} # type: ignore
@distributed_trace
def get_paging_model_with_item_name_with_xms_client_name(
self, **kwargs
) -> AsyncIterable["_models.ProductResultValueWithXMSClientName"]:
"""A paging operation that returns a paging model whose item name is is overriden by x-ms-client-
name 'indexes'.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProductResultValueWithXMSClientName or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~paging.models.ProductResultValueWithXMSClientName]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType["_models.ProductResultValueWithXMSClientName"]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
# Construct URL
url = self.get_paging_model_with_item_name_with_xms_client_name.metadata["url"] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResultValueWithXMSClientName", pipeline_response)
list_of_elem = deserialized.indexes
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_paging_model_with_item_name_with_xms_client_name.metadata = {"url": "/paging/itemNameWithXMSClientName"} # type: ignore
| 47.983884 | 140 | 0.653171 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.polling.async_base_polling import AsyncLROBasePolling
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PagingOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_no_item_name_pages(self, **kwargs) -> AsyncIterable["_models.ProductResultValue"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_no_item_name_pages.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResultValue", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_no_item_name_pages.metadata = {"url": "/paging/noitemname"}
@distributed_trace
def get_null_next_link_name_pages(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_null_next_link_name_pages.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_null_next_link_name_pages.metadata = {"url": "/paging/nullnextlink"}
@distributed_trace
def get_single_pages(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_single_pages.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_single_pages.metadata = {"url": "/paging/single"}
@distributed_trace
def first_response_empty(self, **kwargs) -> AsyncIterable["_models.ProductResultValue"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.first_response_empty.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResultValue", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
first_response_empty.metadata = {"url": "/paging/firstResponseEmpty/1"}
@distributed_trace
def get_multiple_pages(
self,
client_request_id: Optional[str] = None,
paging_get_multiple_pages_options: Optional["_models.PagingGetMultiplePagesOptions"] = None,
**kwargs
) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_timeout = None
if paging_get_multiple_pages_options is not None:
_maxresults = paging_get_multiple_pages_options.maxresults
_timeout = paging_get_multiple_pages_options.timeout
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_multiple_pages.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages.metadata = {"url": "/paging/multiple"}
@distributed_trace
def get_with_query_params(self, required_query_parameter: int, **kwargs) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
query_constant = True
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_with_query_params.metadata["url"]
query_parameters = {}
query_parameters["requiredQueryParameter"] = self._serialize.query(
"required_query_parameter", required_query_parameter, "int"
)
query_parameters["queryConstant"] = self._serialize.query("query_constant", query_constant, "bool")
request = self._client.get(url, query_parameters, header_parameters)
else:
url = "/paging/multiple/nextOperationWithQueryParams"
query_parameters = {}
query_parameters["queryConstant"] = self._serialize.query("query_constant", query_constant, "bool")
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_with_query_params.metadata = {"url": "/paging/multiple/getWithQueryParams"}
@distributed_trace
def get_odata_multiple_pages(
self,
client_request_id: Optional[str] = None,
paging_get_odata_multiple_pages_options: Optional["_models.PagingGetOdataMultiplePagesOptions"] = None,
**kwargs
) -> AsyncIterable["_models.OdataProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_timeout = None
if paging_get_odata_multiple_pages_options is not None:
_maxresults = paging_get_odata_multiple_pages_options.maxresults
_timeout = paging_get_odata_multiple_pages_options.timeout
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_odata_multiple_pages.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OdataProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_odata_multiple_pages.metadata = {"url": "/paging/multiple/odata"}
@distributed_trace
def get_multiple_pages_with_offset(
self,
paging_get_multiple_pages_with_offset_options: "_models.PagingGetMultiplePagesWithOffsetOptions",
client_request_id: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_offset = None
_timeout = None
if paging_get_multiple_pages_with_offset_options is not None:
_maxresults = paging_get_multiple_pages_with_offset_options.maxresults
_offset = paging_get_multiple_pages_with_offset_options.offset
_timeout = paging_get_multiple_pages_with_offset_options.timeout
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_multiple_pages_with_offset.metadata["url"]
path_format_arguments = {
"offset": self._serialize.url("offset", _offset, "int"),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_with_offset.metadata = {"url": "/paging/multiple/withpath/{offset}"}
@distributed_trace
def get_multiple_pages_retry_first(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_multiple_pages_retry_first.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_retry_first.metadata = {"url": "/paging/multiple/retryfirst"}
@distributed_trace
def get_multiple_pages_retry_second(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_multiple_pages_retry_second.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_retry_second.metadata = {"url": "/paging/multiple/retrysecond"}
@distributed_trace
def get_single_pages_failure(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_single_pages_failure.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_single_pages_failure.metadata = {"url": "/paging/single/failure"}
@distributed_trace
def get_multiple_pages_failure(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_multiple_pages_failure.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_failure.metadata = {"url": "/paging/multiple/failure"}
@distributed_trace
def get_multiple_pages_failure_uri(self, **kwargs) -> AsyncIterable["_models.ProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_multiple_pages_failure_uri.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_failure_uri.metadata = {"url": "/paging/multiple/failureuri"}
@distributed_trace
def get_multiple_pages_fragment_next_link(
self, api_version: str, tenant: str, **kwargs
) -> AsyncIterable["_models.OdataProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_multiple_pages_fragment_next_link.metadata["url"]
path_format_arguments = {
"tenant": self._serialize.url("tenant", tenant, "str"),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters["api_version"] = self._serialize.query("api_version", api_version, "str")
request = self._client.get(url, query_parameters, header_parameters)
else:
url = "/paging/multiple/fragment/{tenant}/{nextLink}"
path_format_arguments = {
"tenant": self._serialize.url("tenant", tenant, "str"),
"nextLink": self._serialize.url("next_link", next_link, "str", skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters["api_version"] = self._serialize.query("api_version", api_version, "str")
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OdataProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_fragment_next_link.metadata = {"url": "/paging/multiple/fragment/{tenant}"}
@distributed_trace
def get_multiple_pages_fragment_with_grouping_next_link(
self, custom_parameter_group: "_models.CustomParameterGroup", **kwargs
) -> AsyncIterable["_models.OdataProductResult"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_api_version = None
_tenant = None
if custom_parameter_group is not None:
_api_version = custom_parameter_group.api_version
_tenant = custom_parameter_group.tenant
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_multiple_pages_fragment_with_grouping_next_link.metadata["url"]
path_format_arguments = {
"tenant": self._serialize.url("tenant", _tenant, "str"),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters["api_version"] = self._serialize.query("api_version", _api_version, "str")
request = self._client.get(url, query_parameters, header_parameters)
else:
url = "/paging/multiple/fragmentwithgrouping/{tenant}/{nextLink}"
path_format_arguments = {
"tenant": self._serialize.url("tenant", _tenant, "str"),
"nextLink": self._serialize.url("next_link", next_link, "str", skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters["api_version"] = self._serialize.query("api_version", _api_version, "str")
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OdataProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_multiple_pages_fragment_with_grouping_next_link.metadata = {"url": "/paging/multiple/fragmentwithgrouping/{tenant}"}
async def _get_multiple_pages_lro_initial(
self,
client_request_id: Optional[str] = None,
paging_get_multiple_pages_lro_options: Optional["_models.PagingGetMultiplePagesLroOptions"] = None,
**kwargs
) -> "_models.ProductResult":
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_timeout = None
if paging_get_multiple_pages_lro_options is not None:
_maxresults = paging_get_multiple_pages_lro_options.maxresults
_timeout = paging_get_multiple_pages_lro_options.timeout
accept = "application/json"
url = self._get_multiple_pages_lro_initial.metadata["url"]
query_parameters = {}
header_parameters = {}
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = self._deserialize("ProductResult", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_multiple_pages_lro_initial.metadata = {"url": "/paging/multiple/lro"}
@distributed_trace_async
async def begin_get_multiple_pages_lro(
self,
client_request_id: Optional[str] = None,
paging_get_multiple_pages_lro_options: Optional["_models.PagingGetMultiplePagesLroOptions"] = None,
**kwargs
) -> AsyncLROPoller[AsyncItemPaged["_models.ProductResult"]]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
_maxresults = None
_timeout = None
if paging_get_multiple_pages_lro_options is not None:
_maxresults = paging_get_multiple_pages_lro_options.maxresults
_timeout = paging_get_multiple_pages_lro_options.timeout
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
if client_request_id is not None:
header_parameters["client-request-id"] = self._serialize.header(
"client_request_id", client_request_id, "str"
)
if _maxresults is not None:
header_parameters["maxresults"] = self._serialize.header("maxresults", _maxresults, "int")
if _timeout is not None:
header_parameters["timeout"] = self._serialize.header("timeout", _timeout, "int")
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_multiple_pages_lro.metadata["url"]
query_parameters = {}
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResult", pipeline_response)
list_of_elem = deserialized.values
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
polling = kwargs.pop("polling", False)
cls = kwargs.pop("cls", None)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._get_multiple_pages_lro_initial(
client_request_id=client_request_id,
paging_get_multiple_pages_lro_options=paging_get_multiple_pages_lro_options,
cls=lambda x, y, z: x,
**kwargs
)
kwargs.pop("error_map", None)
kwargs.pop("content_type", None)
def get_long_running_output(pipeline_response):
async def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
else:
return await get_next(next_link)
return AsyncItemPaged(internal_get_next, extract_data)
if polling is True:
polling_method = AsyncLROBasePolling(lro_delay, **kwargs)
elif polling is False:
polling_method = AsyncNoPolling()
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_multiple_pages_lro.metadata = {"url": "/paging/multiple/lro"}
@distributed_trace
def get_paging_model_with_item_name_with_xms_client_name(
self, **kwargs
) -> AsyncIterable["_models.ProductResultValueWithXMSClientName"]:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters["Accept"] = self._serialize.header("accept", accept, "str")
if not next_link:
url = self.get_paging_model_with_item_name_with_xms_client_name.metadata["url"]
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProductResultValueWithXMSClientName", pipeline_response)
list_of_elem = deserialized.indexes
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
get_paging_model_with_item_name_with_xms_client_name.metadata = {"url": "/paging/itemNameWithXMSClientName"}
| true | true |
f72fc59c0d562a760978fca715dfabbc90935136 | 791 | pyde | Python | mode/examples/Topics/Create Shapes/PolygonPShape/PolygonPShape.pyde | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
] | 1,224 | 2015-01-01T22:09:23.000Z | 2022-03-29T19:43:56.000Z | mode/examples/Topics/Create Shapes/PolygonPShape/PolygonPShape.pyde | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
] | 253 | 2015-01-14T03:45:51.000Z | 2022-02-08T01:18:19.000Z | mode/examples/Topics/Create Shapes/PolygonPShape/PolygonPShape.pyde | timgates42/processing.py | 78a237922c2a928b83f4ad579dbf8d32c0099890 | [
"Apache-2.0"
] | 225 | 2015-01-13T18:38:33.000Z | 2022-03-30T20:27:39.000Z |
"""
PrimitivePShape.
Using a PShape to display a custom polygon.
"""
def setup():
size(640, 360, P2D)
smooth()
# First create the shape.
global star
star = createShape()
star.beginShape()
# You can set fill and stroke.
star.fill(102)
star.stroke(255)
star.strokeWeight(2)
# Here, we are hardcoding a series of vertices.
star.vertex(0, -50)
star.vertex(14, -20)
star.vertex(47, -15)
star.vertex(23, 7)
star.vertex(29, 40)
star.vertex(0, 25)
star.vertex(-29, 40)
star.vertex(-23, 7)
star.vertex(-47, -15)
star.vertex(-14, -20)
star.endShape(CLOSE)
def draw():
background(51)
# We can use translate to move the PShape.
translate(mouseX, mouseY)
# Display the shape.
shape(star)
| 19.775 | 51 | 0.610619 |
def setup():
size(640, 360, P2D)
smooth()
global star
star = createShape()
star.beginShape()
star.fill(102)
star.stroke(255)
star.strokeWeight(2)
star.vertex(0, -50)
star.vertex(14, -20)
star.vertex(47, -15)
star.vertex(23, 7)
star.vertex(29, 40)
star.vertex(0, 25)
star.vertex(-29, 40)
star.vertex(-23, 7)
star.vertex(-47, -15)
star.vertex(-14, -20)
star.endShape(CLOSE)
def draw():
background(51)
translate(mouseX, mouseY)
shape(star)
| true | true |
f72fc60681e9156e8be7417e9e0e510cbc1a4913 | 7,388 | py | Python | faucet/faucet_pipeline.py | boldsort/faucet | 451fbaa8ebce1822e06615c9da947f1dc7e3e416 | [
"Apache-2.0"
] | 3 | 2021-04-07T19:10:12.000Z | 2021-12-30T17:11:14.000Z | faucet/faucet_pipeline.py | boldsort/faucet | 451fbaa8ebce1822e06615c9da947f1dc7e3e416 | [
"Apache-2.0"
] | 27 | 2019-03-22T03:44:20.000Z | 2020-01-19T16:53:55.000Z | faucet/faucet_pipeline.py | boldsort/faucet | 451fbaa8ebce1822e06615c9da947f1dc7e3e416 | [
"Apache-2.0"
] | 1 | 2019-10-25T22:51:42.000Z | 2019-10-25T22:51:42.000Z | """Standard FAUCET pipeline."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from faucet.faucet_metadata import EGRESS_METADATA_MASK
class ValveTableConfig: # pylint: disable=too-few-public-methods,too-many-instance-attributes
"""Configuration for a single table."""
def __init__(self, name, table_id, # pylint: disable=too-many-arguments
exact_match=None, meter=None, output=True, miss_goto=None,
size=None, match_types=None, set_fields=None, dec_ttl=None,
vlan_scale=None, vlan_port_scale=None,
next_tables=None, metadata_match=0, metadata_write=0):
self.name = name
self.table_id = table_id
self.exact_match = exact_match
self.meter = meter
self.output = output
self.miss_goto = miss_goto
self.size = size
self.match_types = match_types
self.set_fields = set_fields
self.dec_ttl = dec_ttl
self.vlan_scale = vlan_scale
self.vlan_port_scale = vlan_port_scale
self.metadata_match = metadata_match
self.metadata_write = metadata_write
if next_tables:
assert isinstance(next_tables, (list, tuple))
self.next_tables = next_tables
else:
self.next_tables = ()
def __str__(self):
field_strs = ' '.join([
'%s: %s' % (key, val)
for key, val in sorted(self.__dict__.items())
if val])
return 'table config %s' % field_strs
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash(self.__str__())
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __lt__(self, other):
return self.__hash__() < other.__hash__()
_NEXT_ETH = ('eth_dst_hairpin', 'eth_dst', 'flood')
_NEXT_VIP = ('vip',) + _NEXT_ETH
def _fib_table(ipv, table_id):
return ValveTableConfig(
'ipv%u_fib' % ipv,
table_id,
match_types=(('eth_type', False), ('ipv%u_dst' % ipv, True), ('vlan_vid', False)),
set_fields=('eth_dst', 'eth_src', 'vlan_vid'),
dec_ttl=True,
vlan_port_scale=3.1,
next_tables=_NEXT_VIP
)
PORT_ACL_DEFAULT_CONFIG = ValveTableConfig(
'port_acl',
0,
match_types=(('in_port', False),),
next_tables=(('vlan',) + _NEXT_VIP)
)
VLAN_DEFAULT_CONFIG = ValveTableConfig(
'vlan',
PORT_ACL_DEFAULT_CONFIG.table_id + 1,
match_types=(('eth_dst', True), ('eth_type', False),
('in_port', False), ('vlan_vid', False)),
set_fields=('vlan_vid',),
vlan_port_scale=3,
next_tables=('copro', 'vlan_acl', 'classification', 'eth_src')
)
COPRO_DEFAULT_CONFIG = ValveTableConfig(
'copro',
VLAN_DEFAULT_CONFIG.table_id + 1,
match_types=(('in_port', False), ('eth_type', False), ('vlan_vid', False)),
vlan_port_scale=1.5,
miss_goto='eth_dst',
next_tables=(('eth_dst',)),
)
VLAN_ACL_DEFAULT_CONFIG = ValveTableConfig(
'vlan_acl',
VLAN_DEFAULT_CONFIG.table_id + 1,
next_tables=(('classification', 'eth_src') + _NEXT_ETH))
CLASSIFICATION_DEFAULT_CONFIG = ValveTableConfig(
'classification',
VLAN_ACL_DEFAULT_CONFIG.table_id + 1,
miss_goto='eth_src',
next_tables=(('eth_src', 'ipv4_fib', 'ipv6_fib') + _NEXT_VIP)
)
ETH_SRC_DEFAULT_CONFIG = ValveTableConfig(
'eth_src',
CLASSIFICATION_DEFAULT_CONFIG.table_id + 1,
miss_goto='eth_dst',
next_tables=(('ipv4_fib', 'ipv6_fib') + _NEXT_VIP),
match_types=(('eth_dst', True), ('eth_src', False), ('eth_type', False),
('in_port', False), ('vlan_vid', False)),
set_fields=('vlan_vid', 'eth_dst'),
vlan_port_scale=4.1,
)
IPV4_FIB_DEFAULT_CONFIG = _fib_table(4, ETH_SRC_DEFAULT_CONFIG.table_id + 1)
IPV6_FIB_DEFAULT_CONFIG = _fib_table(6, IPV4_FIB_DEFAULT_CONFIG.table_id + 1)
VIP_DEFAULT_CONFIG = ValveTableConfig(
'vip',
IPV6_FIB_DEFAULT_CONFIG.table_id + 1,
match_types=(('arp_tpa', False), ('eth_dst', False), ('eth_type', False),
('icmpv6_type', False), ('ip_proto', False)),
next_tables=_NEXT_ETH,
vlan_scale=8,
)
ETH_DST_HAIRPIN_DEFAULT_CONFIG = ValveTableConfig(
'eth_dst_hairpin',
VIP_DEFAULT_CONFIG.table_id + 1,
match_types=(('in_port', False), ('eth_dst', False), ('vlan_vid', False)),
miss_goto='eth_dst',
exact_match=True,
vlan_port_scale=4.1,
)
ETH_DST_DEFAULT_CONFIG = ValveTableConfig(
'eth_dst',
ETH_DST_HAIRPIN_DEFAULT_CONFIG.table_id + 1,
exact_match=True,
miss_goto='flood', # Note: when using egress acls the miss goto will be
# egress acl table
match_types=(('eth_dst', False), ('vlan_vid', False)),
next_tables=('egress', 'egress_acl'),
vlan_port_scale=4.1,
metadata_write=EGRESS_METADATA_MASK
)
EGRESS_ACL_DEFAULT_CONFIG = ValveTableConfig(
'egress_acl',
ETH_DST_DEFAULT_CONFIG.table_id + 1,
next_tables=('egress',)
)
EGRESS_DEFAULT_CONFIG = ValveTableConfig(
'egress',
EGRESS_ACL_DEFAULT_CONFIG.table_id + 1,
match_types=(('metadata', True), ('vlan_vid', False)),
vlan_port_scale=1.5,
next_tables=('flood',),
miss_goto='flood',
metadata_match=EGRESS_METADATA_MASK
)
FLOOD_DEFAULT_CONFIG = ValveTableConfig(
'flood',
EGRESS_DEFAULT_CONFIG.table_id + 1,
match_types=(('eth_dst', True), ('in_port', False), ('vlan_vid', False)),
vlan_port_scale=8.0,
)
MINIMUM_FAUCET_PIPELINE_TABLES = {
'vlan', 'eth_src', 'eth_dst', 'flood'}
# TODO: implement an eth_type table before VLAN. This would enable interception
# of control protocols and simplify matches in vlan/eth_src, enabling use of
# exact_match.
FAUCET_PIPELINE = (
PORT_ACL_DEFAULT_CONFIG,
VLAN_DEFAULT_CONFIG,
COPRO_DEFAULT_CONFIG,
VLAN_ACL_DEFAULT_CONFIG,
CLASSIFICATION_DEFAULT_CONFIG,
ETH_SRC_DEFAULT_CONFIG,
IPV4_FIB_DEFAULT_CONFIG,
IPV6_FIB_DEFAULT_CONFIG,
VIP_DEFAULT_CONFIG,
ETH_DST_HAIRPIN_DEFAULT_CONFIG,
ETH_DST_DEFAULT_CONFIG,
EGRESS_ACL_DEFAULT_CONFIG,
EGRESS_DEFAULT_CONFIG,
FLOOD_DEFAULT_CONFIG,
)
DEFAULT_CONFIGS = {
'port_acl': PORT_ACL_DEFAULT_CONFIG,
'vlan': VLAN_DEFAULT_CONFIG,
'copro': COPRO_DEFAULT_CONFIG,
'vlan_acl': VLAN_ACL_DEFAULT_CONFIG,
'eth_src': ETH_SRC_DEFAULT_CONFIG,
'ipv4_fib': IPV4_FIB_DEFAULT_CONFIG,
'ipv6_fib': IPV6_FIB_DEFAULT_CONFIG,
'vip': VIP_DEFAULT_CONFIG,
'eth_dst_hairpin': ETH_DST_HAIRPIN_DEFAULT_CONFIG,
'eth_dst': ETH_DST_DEFAULT_CONFIG,
'egress_acl': EGRESS_ACL_DEFAULT_CONFIG,
'egress': EGRESS_DEFAULT_CONFIG,
'flood': FLOOD_DEFAULT_CONFIG,
}
| 34.362791 | 93 | 0.678668 |
from faucet.faucet_metadata import EGRESS_METADATA_MASK
class ValveTableConfig:
def __init__(self, name, table_id,
exact_match=None, meter=None, output=True, miss_goto=None,
size=None, match_types=None, set_fields=None, dec_ttl=None,
vlan_scale=None, vlan_port_scale=None,
next_tables=None, metadata_match=0, metadata_write=0):
self.name = name
self.table_id = table_id
self.exact_match = exact_match
self.meter = meter
self.output = output
self.miss_goto = miss_goto
self.size = size
self.match_types = match_types
self.set_fields = set_fields
self.dec_ttl = dec_ttl
self.vlan_scale = vlan_scale
self.vlan_port_scale = vlan_port_scale
self.metadata_match = metadata_match
self.metadata_write = metadata_write
if next_tables:
assert isinstance(next_tables, (list, tuple))
self.next_tables = next_tables
else:
self.next_tables = ()
def __str__(self):
field_strs = ' '.join([
'%s: %s' % (key, val)
for key, val in sorted(self.__dict__.items())
if val])
return 'table config %s' % field_strs
def __repr__(self):
return self.__str__()
def __hash__(self):
return hash(self.__str__())
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __lt__(self, other):
return self.__hash__() < other.__hash__()
_NEXT_ETH = ('eth_dst_hairpin', 'eth_dst', 'flood')
_NEXT_VIP = ('vip',) + _NEXT_ETH
def _fib_table(ipv, table_id):
return ValveTableConfig(
'ipv%u_fib' % ipv,
table_id,
match_types=(('eth_type', False), ('ipv%u_dst' % ipv, True), ('vlan_vid', False)),
set_fields=('eth_dst', 'eth_src', 'vlan_vid'),
dec_ttl=True,
vlan_port_scale=3.1,
next_tables=_NEXT_VIP
)
PORT_ACL_DEFAULT_CONFIG = ValveTableConfig(
'port_acl',
0,
match_types=(('in_port', False),),
next_tables=(('vlan',) + _NEXT_VIP)
)
VLAN_DEFAULT_CONFIG = ValveTableConfig(
'vlan',
PORT_ACL_DEFAULT_CONFIG.table_id + 1,
match_types=(('eth_dst', True), ('eth_type', False),
('in_port', False), ('vlan_vid', False)),
set_fields=('vlan_vid',),
vlan_port_scale=3,
next_tables=('copro', 'vlan_acl', 'classification', 'eth_src')
)
COPRO_DEFAULT_CONFIG = ValveTableConfig(
'copro',
VLAN_DEFAULT_CONFIG.table_id + 1,
match_types=(('in_port', False), ('eth_type', False), ('vlan_vid', False)),
vlan_port_scale=1.5,
miss_goto='eth_dst',
next_tables=(('eth_dst',)),
)
VLAN_ACL_DEFAULT_CONFIG = ValveTableConfig(
'vlan_acl',
VLAN_DEFAULT_CONFIG.table_id + 1,
next_tables=(('classification', 'eth_src') + _NEXT_ETH))
CLASSIFICATION_DEFAULT_CONFIG = ValveTableConfig(
'classification',
VLAN_ACL_DEFAULT_CONFIG.table_id + 1,
miss_goto='eth_src',
next_tables=(('eth_src', 'ipv4_fib', 'ipv6_fib') + _NEXT_VIP)
)
ETH_SRC_DEFAULT_CONFIG = ValveTableConfig(
'eth_src',
CLASSIFICATION_DEFAULT_CONFIG.table_id + 1,
miss_goto='eth_dst',
next_tables=(('ipv4_fib', 'ipv6_fib') + _NEXT_VIP),
match_types=(('eth_dst', True), ('eth_src', False), ('eth_type', False),
('in_port', False), ('vlan_vid', False)),
set_fields=('vlan_vid', 'eth_dst'),
vlan_port_scale=4.1,
)
IPV4_FIB_DEFAULT_CONFIG = _fib_table(4, ETH_SRC_DEFAULT_CONFIG.table_id + 1)
IPV6_FIB_DEFAULT_CONFIG = _fib_table(6, IPV4_FIB_DEFAULT_CONFIG.table_id + 1)
VIP_DEFAULT_CONFIG = ValveTableConfig(
'vip',
IPV6_FIB_DEFAULT_CONFIG.table_id + 1,
match_types=(('arp_tpa', False), ('eth_dst', False), ('eth_type', False),
('icmpv6_type', False), ('ip_proto', False)),
next_tables=_NEXT_ETH,
vlan_scale=8,
)
ETH_DST_HAIRPIN_DEFAULT_CONFIG = ValveTableConfig(
'eth_dst_hairpin',
VIP_DEFAULT_CONFIG.table_id + 1,
match_types=(('in_port', False), ('eth_dst', False), ('vlan_vid', False)),
miss_goto='eth_dst',
exact_match=True,
vlan_port_scale=4.1,
)
ETH_DST_DEFAULT_CONFIG = ValveTableConfig(
'eth_dst',
ETH_DST_HAIRPIN_DEFAULT_CONFIG.table_id + 1,
exact_match=True,
miss_goto='flood',
match_types=(('eth_dst', False), ('vlan_vid', False)),
next_tables=('egress', 'egress_acl'),
vlan_port_scale=4.1,
metadata_write=EGRESS_METADATA_MASK
)
EGRESS_ACL_DEFAULT_CONFIG = ValveTableConfig(
'egress_acl',
ETH_DST_DEFAULT_CONFIG.table_id + 1,
next_tables=('egress',)
)
EGRESS_DEFAULT_CONFIG = ValveTableConfig(
'egress',
EGRESS_ACL_DEFAULT_CONFIG.table_id + 1,
match_types=(('metadata', True), ('vlan_vid', False)),
vlan_port_scale=1.5,
next_tables=('flood',),
miss_goto='flood',
metadata_match=EGRESS_METADATA_MASK
)
FLOOD_DEFAULT_CONFIG = ValveTableConfig(
'flood',
EGRESS_DEFAULT_CONFIG.table_id + 1,
match_types=(('eth_dst', True), ('in_port', False), ('vlan_vid', False)),
vlan_port_scale=8.0,
)
MINIMUM_FAUCET_PIPELINE_TABLES = {
'vlan', 'eth_src', 'eth_dst', 'flood'}
FAUCET_PIPELINE = (
PORT_ACL_DEFAULT_CONFIG,
VLAN_DEFAULT_CONFIG,
COPRO_DEFAULT_CONFIG,
VLAN_ACL_DEFAULT_CONFIG,
CLASSIFICATION_DEFAULT_CONFIG,
ETH_SRC_DEFAULT_CONFIG,
IPV4_FIB_DEFAULT_CONFIG,
IPV6_FIB_DEFAULT_CONFIG,
VIP_DEFAULT_CONFIG,
ETH_DST_HAIRPIN_DEFAULT_CONFIG,
ETH_DST_DEFAULT_CONFIG,
EGRESS_ACL_DEFAULT_CONFIG,
EGRESS_DEFAULT_CONFIG,
FLOOD_DEFAULT_CONFIG,
)
DEFAULT_CONFIGS = {
'port_acl': PORT_ACL_DEFAULT_CONFIG,
'vlan': VLAN_DEFAULT_CONFIG,
'copro': COPRO_DEFAULT_CONFIG,
'vlan_acl': VLAN_ACL_DEFAULT_CONFIG,
'eth_src': ETH_SRC_DEFAULT_CONFIG,
'ipv4_fib': IPV4_FIB_DEFAULT_CONFIG,
'ipv6_fib': IPV6_FIB_DEFAULT_CONFIG,
'vip': VIP_DEFAULT_CONFIG,
'eth_dst_hairpin': ETH_DST_HAIRPIN_DEFAULT_CONFIG,
'eth_dst': ETH_DST_DEFAULT_CONFIG,
'egress_acl': EGRESS_ACL_DEFAULT_CONFIG,
'egress': EGRESS_DEFAULT_CONFIG,
'flood': FLOOD_DEFAULT_CONFIG,
}
| true | true |
f72fc60aecfcd841a19625037bc8f38fa6921303 | 30,717 | py | Python | models/tests/test_dataio.py | endymecy/NDIToolbox | f7a0a642b4a778d9d0c131871f4bfb9822ecb3da | [
"BSD-4-Clause"
] | 5 | 2017-02-28T16:16:06.000Z | 2020-07-13T06:49:34.000Z | models/tests/test_dataio.py | endymecy/NDIToolbox | f7a0a642b4a778d9d0c131871f4bfb9822ecb3da | [
"BSD-4-Clause"
] | 1 | 2018-08-19T19:08:14.000Z | 2018-08-19T19:08:14.000Z | models/tests/test_dataio.py | endymecy/NDIToolbox | f7a0a642b4a778d9d0c131871f4bfb9822ecb3da | [
"BSD-4-Clause"
] | 4 | 2017-10-25T20:17:15.000Z | 2021-07-26T11:39:50.000Z | """test_dataio.py - tests the dataio module
Chris R. Coughlin (TRI/Austin, Inc.)
"""
__author__ = 'Chris R. Coughlin'
import unittest
from models import dataio
from controllers import pathfinder
from utils.skiptest import skipIfModuleNotInstalled
import h5py
import numpy as np
import numpy.testing
import scipy.misc
import os
import random
class TestDataIO(unittest.TestCase):
"""Tests Data IO functions"""
def setUp(self):
self.sample_data = np.array(self.random_data())
self.sample_data_basename = "sample.dat"
self.sample_data_file = os.path.join(os.path.dirname(__file__),
self.sample_data_basename)
with h5py.File(self.sample_data_file, 'w') as fidout:
fidout.create_dataset(self.sample_data_basename, data=self.sample_data)
def random_data(self):
"""Returns a list of random data"""
return [random.uniform(-100, 100) for i in range(25)]
def test_save_data(self):
"""Verify save_data function saves NumPy array to disk"""
sample_filename = "test_savedata.dat"
sample_path = os.path.join(os.path.dirname(__file__), sample_filename)
dataio.save_data(sample_path, self.sample_data)
self.assertTrue(os.path.exists(sample_path + ".hdf5"))
with h5py.File(sample_path + ".hdf5", "r") as fidin:
froot, ext = os.path.splitext(os.path.basename(sample_filename))
for key in fidin.keys():
if key.startswith(froot):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(self.sample_data, read_data))
if os.path.exists(sample_path + ".hdf5"):
os.remove(sample_path + ".hdf5")
def test_get_data(self):
"""Verify get_data function returns a NumPy array"""
read_data = dataio.get_data(self.sample_data_file)
self.assertTrue(np.array_equal(self.sample_data, read_data))
def test_get_data_slice(self):
"""Verify get_data function returns a slice if specified"""
slice_idx = np.s_[5:15]
read_hyperslab = dataio.get_data(self.sample_data_file, slice_idx)
self.assertTrue(np.array_equal(self.sample_data[slice_idx], read_hyperslab))
def test_get_txt_data(self):
"""Verify retrieval of ASCII delimited data"""
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'1.25 from hole Single Column.asc')
assert(os.path.exists(sample_data_file))
import_params = {'delimiter': None}
expected_data = np.loadtxt(sample_data_file, delimiter=import_params['delimiter'])
retrieved_data = dataio.get_txt_data(sample_data_file, **import_params)
self.assertTrue(np.array_equal(expected_data, retrieved_data))
def test_import_txt(self):
"""Verify import of ASCII delimited data files"""
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'1.25 from hole Single Column.asc')
assert(os.path.exists(sample_data_file))
import_params = {'delimiter': None}
expected_data = np.loadtxt(sample_data_file, delimiter=import_params['delimiter'])
dataio.import_txt(sample_data_file, **import_params)
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(sample_data_file) + ".hdf5")
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(expected_data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
def test_export_txt(self):
"""Verify export of data to delimited ASCII"""
# Use integer data to avoid the floating point conversion to/from files
sample_data = self.sample_data.astype(np.int64)
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'sample.hdf5')
dest_file = os.path.join(os.path.dirname(__file__), 'support_files',
'sample.txt')
with h5py.File(sample_data_file, "w") as fidout:
fidout.create_dataset(os.path.basename(sample_data_file), data=sample_data)
export_params = {'delimiter': ','}
dataio.export_txt(dest_file, sample_data_file, **export_params)
retrieved_data = np.genfromtxt(dest_file, delimiter=export_params['delimiter'])
self.assertTrue(np.array_equal(sample_data, retrieved_data))
try:
if os.path.exists(sample_data_file):
os.remove(sample_data_file)
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
def test_export3D_txt(self):
"""Verify export of 3D data to delimited ASCII"""
x_size = 5
y_size = 4
z_size = 6
sample_data = np.empty((y_size, x_size, z_size))
for xidx in range(x_size):
for yidx in range(y_size):
for zidx in range(z_size):
sample_data[yidx, xidx, zidx] = int(random.uniform(-100, 100))
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample3d.hdf5')
dest_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample3d.txt')
with h5py.File(sample_data_file, "w") as fidout:
fidout.create_dataset(os.path.basename(sample_data_file), data=sample_data)
export_params = {'delimiter': ','}
dataio.export_txt(dest_file, sample_data_file, **export_params)
retrieved_data = np.empty(sample_data.shape)
with open(dest_file, "rb") as fidin:
zidx = 0
for line in fidin:
if not line.startswith('#'):
x, y, z = line.split(export_params['delimiter'])
x = int(x)
y = int(y)
z = float(z.strip())
retrieved_data[y, x, zidx] = z
zidx += 1
if zidx > sample_data.shape[2]-1:
zidx = 0
self.assertTrue(np.array_equal(sample_data, retrieved_data))
try:
if os.path.exists(sample_data_file):
os.remove(sample_data_file)
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
@skipIfModuleNotInstalled("dicom")
def test_get_dicom_data(self):
"""Verify retrieval of DICOM / DICONDE data"""
import dicom
diconde_folder = os.path.join(os.path.dirname(__file__), 'support_files')
for root, dirs, files in os.walk(diconde_folder):
for fname in files:
dicom_data_file = os.path.join(root, fname)
basename, ext = os.path.splitext(dicom_data_file)
# Simple check to ensure we're looking at DICOM files
if ext.lower() == '.dcm':
dicom_data = dicom.read_file(dicom_data_file)
dicom_arr = dicom_data.pixel_array
retrieved_data = dataio.get_dicom_data(dicom_data_file)
self.assertTrue(np.array_equal(dicom_arr, retrieved_data))
@skipIfModuleNotInstalled("dicom")
def test_import_dicom(self):
"""Verify import of DICOM / DICONDE data"""
# Load the ASTM DICONDE example files,
# save, then ensure the resulting arrays
# are identical
import dicom
diconde_folder = os.path.join(os.path.dirname(__file__), 'support_files')
for root, dirs, files in os.walk(diconde_folder):
for fname in files:
dicom_data_file = os.path.join(root, fname)
basename, ext = os.path.splitext(dicom_data_file)
# Simple check to ensure we're looking at DICOM files
if ext.lower() == '.dcm':
dicom_data = dicom.read_file(dicom_data_file)
dicom_arr = dicom_data.pixel_array
dataio.import_dicom(dicom_data_file)
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(dicom_data_file) + ".hdf5")
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
froot, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(froot):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(dicom_arr, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # File in use
pass
def test_get_img_data(self):
"""Verify retrieval of bitmap data"""
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'austin_sky320x240.jpg')
assert(os.path.exists(sample_data_file))
expected_data = scipy.misc.imread(sample_data_file, flatten=True)
retrieved_data = dataio.get_img_data(sample_data_file, flatten=True)
self.assertTrue(np.array_equal(expected_data, retrieved_data))
def test_import_img(self):
"""Verify import of images"""
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'austin_sky320x240.jpg')
assert(os.path.exists(sample_data_file))
expected_data = scipy.misc.imread(sample_data_file, flatten=True)
dataio.import_img(sample_data_file, flatten=True)
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(sample_data_file) + ".hdf5")
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(expected_data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
def test_get_utwin_tof_data(self):
"""Verify retrieval of UTWin Time Of Flight data through convenience function"""
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')
tof_resolution = 0.01
assert(os.path.exists(tof_data_file))
expected_tof_data = np.load(tof_data_file) * tof_resolution
returned_tof_data = dataio.get_utwin_tof_data(sample_data_file)[0]
numpy.testing.assert_array_almost_equal(expected_tof_data, returned_tof_data, decimal=3)
def test_import_utwin_tof(self):
"""Verify import of UTWin Time Of Flight data through convenience function"""
tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
tof_resolution = 0.01
expected_tof_data = np.load(tof_data_file) * tof_resolution
root, ext = os.path.splitext(os.path.basename(sample_data_file))
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(root) + "_tofdata0.csc.hdf5")
dataio.import_utwin_tof(sample_data_file)
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
numpy.testing.assert_array_almost_equal(expected_tof_data, read_data, decimal=3)
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
def test_get_utwin_amp_data(self):
"""Verify retrieval of UTWin amplitude data through convenience function"""
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')
assert(os.path.exists(amp_data_file))
expected_tof_data = np.load(amp_data_file)
self.assertTrue(np.array_equal(expected_tof_data, dataio.get_utwin_amp_data(sample_data_file)[0]))
def test_import_utwin_amp(self):
"""Verify import of UTWin amplitude data through convenience function"""
amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
expected_amp_data = np.load(amp_data_file)
root, ext = os.path.splitext(os.path.basename(sample_data_file))
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(root) + "_ampdata0.csc.hdf5")
dataio.import_utwin_amp(sample_data_file)
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(expected_amp_data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
def test_get_utwin_data(self):
"""Verify returning UTWin data"""
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
sample_reader = dataio.UTWinCScanDataFile(sample_data_file)
sample_reader.read_data()
expected_data = sample_reader.data
returned_data = dataio.get_utwin_data(sample_data_file)
for datatype in expected_data:
self.assertTrue(np.array_equal(expected_data[datatype], returned_data[datatype]))
def test_get_winspect_data(self):
"""Verify retrieval of Winspect data through convenience function"""
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample_data.sdt')
assert(os.path.exists(sample_data_file))
scan_reader = dataio.WinspectReader(sample_data_file)
expected_data_list = scan_reader.get_winspect_data()
retrieved_data_list = dataio.get_winspect_data(sample_data_file)
self.assertEqual(len(expected_data_list), len(retrieved_data_list))
for data_array_idx in range(len(expected_data_list)):
self.assertTrue(np.array_equal(expected_data_list[data_array_idx].data, retrieved_data_list[data_array_idx].data))
def test_import_winspect(self):
"""Verify import of Winspect data through convenience function"""
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample_data.sdt')
assert(os.path.exists(sample_data_file))
output_basename, ext = os.path.splitext(sample_data_file)
amp_dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(output_basename) + "_ampdata0" + ext + ".hdf5")
waveform_dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(output_basename) + "_waveformdata0" + ext + ".hdf5")
dataio.import_winspect(sample_data_file)
expected_data_list = dataio.get_winspect_data(sample_data_file)
for dataset in expected_data_list:
if "amplitude" in dataset.data_type:
dest_file = amp_dest_file
elif "waveform" in dataset.data_type:
dest_file = waveform_dest_file
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(dataset.data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
def tearDown(self):
if os.path.exists(self.sample_data_file + ".hdf5"):
os.remove(self.sample_data_file + ".hdf5")
if os.path.exists(self.sample_data_file):
os.remove(self.sample_data_file)
class TestUTWinCScanReader(unittest.TestCase):
"""Tests the UTWinCScanReader class"""
def setUp(self):
self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
assert(os.path.exists(self.sample_data_file))
self.cscan_reader = dataio.UTWinCscanReader()
def test_basicfile_parameters(self):
"""Verify the basic parameters of the CSC file format are correct"""
self.assertEqual(self.cscan_reader.header_string_length, 15)
expected_message_ids = {'CSCAN_DATA': 2300,
'WAVEFORM_pre240': 2016,
'WAVEFORM_post240': 2303,
'UTSAVE_UTCD0': 2010,
'UTSAVE_UTCD1': 2011,
'UTSAVE_UTCD2': 2012,
'UTSAVE_UTCD4': 2014,
'UTSAVE_UTPro0': 253,
'PROJECT': 301,
'UTSAVE_UTHead': 100,
'UTSAVE_UTCScan0': 750,
'UTSAVE_UTCD10': 2020,
'UTSAVE_UTCScan3': 753}
self.assertDictEqual(expected_message_ids, self.cscan_reader.message_ids)
def test_is_cscanfile(self):
"""Verify reader correctly identifies CSC files"""
self.assertTrue(self.cscan_reader.is_cscanfile(self.sample_data_file))
def test_msg_info(self):
"""Verify reader correctly returns message ID and length"""
with open(self.sample_data_file, "rb") as fidin:
fidin.seek(self.cscan_reader.header_string_length)
first_message = (100, 14)
self.assertTupleEqual(first_message, self.cscan_reader.msg_info(fidin))
def test_find_message(self):
"""Verify find_message returns the expected file positions"""
expected_file_positions = ((2014, 38037),
(2011, 38059),
(2010, 38003),
(2012, 422075),
(2010, 38003),
(2010, 38003))
for message_id, expected_pos in expected_file_positions:
self.assertEqual(self.cscan_reader.find_message(self.sample_data_file, message_id), expected_pos)
def test_find_blocks(self):
"""Verify find_blocks returns the file positions for the specified message ID"""
# Search for UTSave_UTAD0 (Message ID 950) - contains A/D settings for each channel
expected_filed_positions = [173, 920, 1667, 2414, 3161, 3908, 4655, 5402]
self.assertListEqual(expected_filed_positions, self.cscan_reader.find_blocks(self.sample_data_file, 950))
def test_read_field(self):
"""Verify read_field correctly parses the specified message block"""
start_pos = self.cscan_reader.find_message(self.sample_data_file, 950)
self.assertTrue(start_pos != -1)
with open(self.sample_data_file, "rb") as fidin:
fidin.seek(start_pos)
# Read a sample of A/D settings for the first channel
expected_ad_delay = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_width = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_blanking_width = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_gain = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_offset = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_trigger_level = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_trigger_rate = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
with open(self.sample_data_file, "rb") as fidin:
fidin.seek(start_pos)
ad_delay = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_width = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_blanking_width = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_gain = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_offset = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_trigger_level = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_trigger_rate = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
self.assertAlmostEqual(expected_ad_delay, ad_delay)
self.assertAlmostEqual(expected_ad_width, ad_width)
self.assertAlmostEqual(expected_ad_blanking_width, ad_blanking_width)
self.assertAlmostEqual(expected_ad_gain, ad_gain)
self.assertAlmostEqual(expected_ad_offset, ad_offset)
self.assertAlmostEqual(expected_ad_trigger_level, ad_trigger_level)
self.assertAlmostEqual(expected_ad_trigger_rate, ad_trigger_rate)
class TestUTWinCScanDataFile(unittest.TestCase):
"""Tests the UTWinCScanDataFile class.
Note: the sample UTWin data files available to TRI as of May 2013 are export-controlled and can't be
distributed, which in turn limits the tests that can be performed. The UTWinCScanDataFile class has been
tested against real inspection data, however without additional sample files you should consider the code
experimental. For more details, contact TRI.
"""
def setUp(self):
self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
self.cscan_datafile = dataio.UTWinCScanDataFile(self.sample_data_file)
def test_get_scan_version(self):
"""Verify get_scan_version returns the correct scan version"""
self.assertEqual(self.cscan_datafile.get_scan_version(), 117)
def test_read_scan_properties(self):
"""Verify read_scan_properties correctly compiles required scan settings"""
# Read a sample of the most important properties, verify read
important_scan_properties = {'n_height':320,
'n_width':600,
'rf_length':2994,
'channel_active':[1, 0, 0, 0, 0, 0, 0, 0]}
for idx in important_scan_properties.keys():
prop = important_scan_properties[idx]
if not isinstance(prop, list):
self.assertEqual(prop, self.cscan_datafile.scan_properties[idx])
else:
self.assertListEqual(prop, self.cscan_datafile.scan_properties[idx])
def test_read_tof_data(self):
"""Verify read_tof_data correctly reads Time Of Flight data"""
# Verify one TOF dataset
tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')
tof_resolution = 0.01
assert(os.path.exists(tof_data_file))
expected_tof_data = np.load(tof_data_file) * tof_resolution
self.cscan_datafile.read_tof_data()
numpy.testing.assert_array_almost_equal(expected_tof_data, self.cscan_datafile.data['tof'][0], decimal=3)
def test_read_amplitude_data(self):
"""Verify read_amplitude_data correctly reads amplitude data"""
amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')
assert(os.path.exists(amp_data_file))
expected_amp_data = np.load(amp_data_file)
self.cscan_datafile.read_amplitude_data()
self.assertTrue(np.array_equal(expected_amp_data, self.cscan_datafile.data['amplitude'][0]))
def test_import_tof(self):
"""Verify import of Time Of Flight data"""
tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')
tof_resolution = 0.01
csc_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData')
assert(os.path.exists(tof_data_file))
expected_tof_data = np.load(tof_data_file) * tof_resolution
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(csc_data_file) + "_tofdata0.csc.hdf5")
self.cscan_datafile.import_tof_data()
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
numpy.testing.assert_array_almost_equal(expected_tof_data, read_data, decimal=3)
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
def test_import_amp(self):
"""Verify import of amplitude data"""
amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')
csc_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData')
assert(os.path.exists(amp_data_file))
expected_amp_data = np.load(amp_data_file)
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(csc_data_file) + "_ampdata0.csc.hdf5")
self.cscan_datafile.import_amplitude_data()
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(expected_amp_data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
class TestWinspectReader(unittest.TestCase):
"""Tests the WinspectReader class."""
def setUp(self):
self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'sample_data.sdt')
assert(os.path.exists(self.sample_data_file))
self.scan_reader = dataio.WinspectReader(self.sample_data_file)
def test_find_numbers(self):
"""Verify find_numbers static method correctly pulls numbers from strings"""
float_strings = {"0.000000 mm":0.0, "0.775995 Usec":0.775995}
int_strings = {"35 18 0 22 3 112 ":[35, 18, 0, 22, 3, 112],
"Number of Sample Points : 3500":3500}
bad_strings = {"Ramshackle":[], "":[]}
for string in float_strings:
self.assertAlmostEqual(float_strings[string], self.scan_reader.find_numbers(string))
def test_get_winspect_data(self):
"""Verify returning the list of arrays read from the data file"""
data_reader = dataio.WinspectDataFile(self.sample_data_file)
data_reader.read_data()
expected_data_list = data_reader.datasets
retrieved_data_list = self.scan_reader.get_winspect_data()
self.assertEqual(len(expected_data_list), len(retrieved_data_list))
for data_array_idx in range(len(expected_data_list)):
self.assertTrue(np.array_equal(expected_data_list[data_array_idx].data, retrieved_data_list[data_array_idx].data))
def test_import_winspect(self):
"""Verify importing datasets"""
output_basename, ext = os.path.splitext(self.sample_data_file)
amp_dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(output_basename) + "_ampdata0" + ext + ".hdf5")
waveform_dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(output_basename) + "_waveformdata0" + ext + ".hdf5")
self.scan_reader.import_winspect()
data_reader = dataio.WinspectDataFile(self.sample_data_file)
data_reader.read_data()
expected_data_list = data_reader.datasets
for dataset in expected_data_list:
if "amplitude" in dataset.data_type:
dest_file = amp_dest_file
elif "waveform" in dataset.data_type:
dest_file = waveform_dest_file
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(dataset.data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError: # file in use
pass
if __name__ == "__main__":
random.seed()
unittest.main() | 51.799325 | 126 | 0.624442 |
__author__ = 'Chris R. Coughlin'
import unittest
from models import dataio
from controllers import pathfinder
from utils.skiptest import skipIfModuleNotInstalled
import h5py
import numpy as np
import numpy.testing
import scipy.misc
import os
import random
class TestDataIO(unittest.TestCase):
def setUp(self):
self.sample_data = np.array(self.random_data())
self.sample_data_basename = "sample.dat"
self.sample_data_file = os.path.join(os.path.dirname(__file__),
self.sample_data_basename)
with h5py.File(self.sample_data_file, 'w') as fidout:
fidout.create_dataset(self.sample_data_basename, data=self.sample_data)
def random_data(self):
return [random.uniform(-100, 100) for i in range(25)]
def test_save_data(self):
sample_filename = "test_savedata.dat"
sample_path = os.path.join(os.path.dirname(__file__), sample_filename)
dataio.save_data(sample_path, self.sample_data)
self.assertTrue(os.path.exists(sample_path + ".hdf5"))
with h5py.File(sample_path + ".hdf5", "r") as fidin:
froot, ext = os.path.splitext(os.path.basename(sample_filename))
for key in fidin.keys():
if key.startswith(froot):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(self.sample_data, read_data))
if os.path.exists(sample_path + ".hdf5"):
os.remove(sample_path + ".hdf5")
def test_get_data(self):
read_data = dataio.get_data(self.sample_data_file)
self.assertTrue(np.array_equal(self.sample_data, read_data))
def test_get_data_slice(self):
slice_idx = np.s_[5:15]
read_hyperslab = dataio.get_data(self.sample_data_file, slice_idx)
self.assertTrue(np.array_equal(self.sample_data[slice_idx], read_hyperslab))
def test_get_txt_data(self):
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'1.25 from hole Single Column.asc')
assert(os.path.exists(sample_data_file))
import_params = {'delimiter': None}
expected_data = np.loadtxt(sample_data_file, delimiter=import_params['delimiter'])
retrieved_data = dataio.get_txt_data(sample_data_file, **import_params)
self.assertTrue(np.array_equal(expected_data, retrieved_data))
def test_import_txt(self):
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'1.25 from hole Single Column.asc')
assert(os.path.exists(sample_data_file))
import_params = {'delimiter': None}
expected_data = np.loadtxt(sample_data_file, delimiter=import_params['delimiter'])
dataio.import_txt(sample_data_file, **import_params)
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(sample_data_file) + ".hdf5")
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(expected_data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
def test_export_txt(self):
sample_data = self.sample_data.astype(np.int64)
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'sample.hdf5')
dest_file = os.path.join(os.path.dirname(__file__), 'support_files',
'sample.txt')
with h5py.File(sample_data_file, "w") as fidout:
fidout.create_dataset(os.path.basename(sample_data_file), data=sample_data)
export_params = {'delimiter': ','}
dataio.export_txt(dest_file, sample_data_file, **export_params)
retrieved_data = np.genfromtxt(dest_file, delimiter=export_params['delimiter'])
self.assertTrue(np.array_equal(sample_data, retrieved_data))
try:
if os.path.exists(sample_data_file):
os.remove(sample_data_file)
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
def test_export3D_txt(self):
x_size = 5
y_size = 4
z_size = 6
sample_data = np.empty((y_size, x_size, z_size))
for xidx in range(x_size):
for yidx in range(y_size):
for zidx in range(z_size):
sample_data[yidx, xidx, zidx] = int(random.uniform(-100, 100))
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample3d.hdf5')
dest_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample3d.txt')
with h5py.File(sample_data_file, "w") as fidout:
fidout.create_dataset(os.path.basename(sample_data_file), data=sample_data)
export_params = {'delimiter': ','}
dataio.export_txt(dest_file, sample_data_file, **export_params)
retrieved_data = np.empty(sample_data.shape)
with open(dest_file, "rb") as fidin:
zidx = 0
for line in fidin:
if not line.startswith('#'):
x, y, z = line.split(export_params['delimiter'])
x = int(x)
y = int(y)
z = float(z.strip())
retrieved_data[y, x, zidx] = z
zidx += 1
if zidx > sample_data.shape[2]-1:
zidx = 0
self.assertTrue(np.array_equal(sample_data, retrieved_data))
try:
if os.path.exists(sample_data_file):
os.remove(sample_data_file)
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
@skipIfModuleNotInstalled("dicom")
def test_get_dicom_data(self):
import dicom
diconde_folder = os.path.join(os.path.dirname(__file__), 'support_files')
for root, dirs, files in os.walk(diconde_folder):
for fname in files:
dicom_data_file = os.path.join(root, fname)
basename, ext = os.path.splitext(dicom_data_file)
if ext.lower() == '.dcm':
dicom_data = dicom.read_file(dicom_data_file)
dicom_arr = dicom_data.pixel_array
retrieved_data = dataio.get_dicom_data(dicom_data_file)
self.assertTrue(np.array_equal(dicom_arr, retrieved_data))
@skipIfModuleNotInstalled("dicom")
def test_import_dicom(self):
# Load the ASTM DICONDE example files,
# save, then ensure the resulting arrays
# are identical
import dicom
diconde_folder = os.path.join(os.path.dirname(__file__), 'support_files')
for root, dirs, files in os.walk(diconde_folder):
for fname in files:
dicom_data_file = os.path.join(root, fname)
basename, ext = os.path.splitext(dicom_data_file)
# Simple check to ensure we're looking at DICOM files
if ext.lower() == '.dcm':
dicom_data = dicom.read_file(dicom_data_file)
dicom_arr = dicom_data.pixel_array
dataio.import_dicom(dicom_data_file)
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(dicom_data_file) + ".hdf5")
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
froot, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(froot):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(dicom_arr, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
def test_get_img_data(self):
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'austin_sky320x240.jpg')
assert(os.path.exists(sample_data_file))
expected_data = scipy.misc.imread(sample_data_file, flatten=True)
retrieved_data = dataio.get_img_data(sample_data_file, flatten=True)
self.assertTrue(np.array_equal(expected_data, retrieved_data))
def test_import_img(self):
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'austin_sky320x240.jpg')
assert(os.path.exists(sample_data_file))
expected_data = scipy.misc.imread(sample_data_file, flatten=True)
dataio.import_img(sample_data_file, flatten=True)
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(sample_data_file) + ".hdf5")
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(expected_data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
def test_get_utwin_tof_data(self):
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')
tof_resolution = 0.01
assert(os.path.exists(tof_data_file))
expected_tof_data = np.load(tof_data_file) * tof_resolution
returned_tof_data = dataio.get_utwin_tof_data(sample_data_file)[0]
numpy.testing.assert_array_almost_equal(expected_tof_data, returned_tof_data, decimal=3)
def test_import_utwin_tof(self):
tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
tof_resolution = 0.01
expected_tof_data = np.load(tof_data_file) * tof_resolution
root, ext = os.path.splitext(os.path.basename(sample_data_file))
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(root) + "_tofdata0.csc.hdf5")
dataio.import_utwin_tof(sample_data_file)
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
numpy.testing.assert_array_almost_equal(expected_tof_data, read_data, decimal=3)
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
def test_get_utwin_amp_data(self):
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')
assert(os.path.exists(amp_data_file))
expected_tof_data = np.load(amp_data_file)
self.assertTrue(np.array_equal(expected_tof_data, dataio.get_utwin_amp_data(sample_data_file)[0]))
def test_import_utwin_amp(self):
amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
expected_amp_data = np.load(amp_data_file)
root, ext = os.path.splitext(os.path.basename(sample_data_file))
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(root) + "_ampdata0.csc.hdf5")
dataio.import_utwin_amp(sample_data_file)
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(expected_amp_data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
def test_get_utwin_data(self):
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
sample_reader = dataio.UTWinCScanDataFile(sample_data_file)
sample_reader.read_data()
expected_data = sample_reader.data
returned_data = dataio.get_utwin_data(sample_data_file)
for datatype in expected_data:
self.assertTrue(np.array_equal(expected_data[datatype], returned_data[datatype]))
def test_get_winspect_data(self):
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample_data.sdt')
assert(os.path.exists(sample_data_file))
scan_reader = dataio.WinspectReader(sample_data_file)
expected_data_list = scan_reader.get_winspect_data()
retrieved_data_list = dataio.get_winspect_data(sample_data_file)
self.assertEqual(len(expected_data_list), len(retrieved_data_list))
for data_array_idx in range(len(expected_data_list)):
self.assertTrue(np.array_equal(expected_data_list[data_array_idx].data, retrieved_data_list[data_array_idx].data))
def test_import_winspect(self):
sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'sample_data.sdt')
assert(os.path.exists(sample_data_file))
output_basename, ext = os.path.splitext(sample_data_file)
amp_dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(output_basename) + "_ampdata0" + ext + ".hdf5")
waveform_dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(output_basename) + "_waveformdata0" + ext + ".hdf5")
dataio.import_winspect(sample_data_file)
expected_data_list = dataio.get_winspect_data(sample_data_file)
for dataset in expected_data_list:
if "amplitude" in dataset.data_type:
dest_file = amp_dest_file
elif "waveform" in dataset.data_type:
dest_file = waveform_dest_file
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(dataset.data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
def tearDown(self):
if os.path.exists(self.sample_data_file + ".hdf5"):
os.remove(self.sample_data_file + ".hdf5")
if os.path.exists(self.sample_data_file):
os.remove(self.sample_data_file)
class TestUTWinCScanReader(unittest.TestCase):
def setUp(self):
self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
assert(os.path.exists(self.sample_data_file))
self.cscan_reader = dataio.UTWinCscanReader()
def test_basicfile_parameters(self):
self.assertEqual(self.cscan_reader.header_string_length, 15)
expected_message_ids = {'CSCAN_DATA': 2300,
'WAVEFORM_pre240': 2016,
'WAVEFORM_post240': 2303,
'UTSAVE_UTCD0': 2010,
'UTSAVE_UTCD1': 2011,
'UTSAVE_UTCD2': 2012,
'UTSAVE_UTCD4': 2014,
'UTSAVE_UTPro0': 253,
'PROJECT': 301,
'UTSAVE_UTHead': 100,
'UTSAVE_UTCScan0': 750,
'UTSAVE_UTCD10': 2020,
'UTSAVE_UTCScan3': 753}
self.assertDictEqual(expected_message_ids, self.cscan_reader.message_ids)
def test_is_cscanfile(self):
self.assertTrue(self.cscan_reader.is_cscanfile(self.sample_data_file))
def test_msg_info(self):
with open(self.sample_data_file, "rb") as fidin:
fidin.seek(self.cscan_reader.header_string_length)
first_message = (100, 14)
self.assertTupleEqual(first_message, self.cscan_reader.msg_info(fidin))
def test_find_message(self):
expected_file_positions = ((2014, 38037),
(2011, 38059),
(2010, 38003),
(2012, 422075),
(2010, 38003),
(2010, 38003))
for message_id, expected_pos in expected_file_positions:
self.assertEqual(self.cscan_reader.find_message(self.sample_data_file, message_id), expected_pos)
def test_find_blocks(self):
expected_filed_positions = [173, 920, 1667, 2414, 3161, 3908, 4655, 5402]
self.assertListEqual(expected_filed_positions, self.cscan_reader.find_blocks(self.sample_data_file, 950))
def test_read_field(self):
start_pos = self.cscan_reader.find_message(self.sample_data_file, 950)
self.assertTrue(start_pos != -1)
with open(self.sample_data_file, "rb") as fidin:
fidin.seek(start_pos)
expected_ad_delay = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_width = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_blanking_width = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_gain = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_offset = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_trigger_level = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
expected_ad_trigger_rate = np.fromfile(fidin, self.cscan_reader.field_sizes['float'], 1)[0]
with open(self.sample_data_file, "rb") as fidin:
fidin.seek(start_pos)
ad_delay = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_width = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_blanking_width = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_gain = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_offset = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_trigger_level = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
ad_trigger_rate = self.cscan_reader.read_field(fidin, self.cscan_reader.field_sizes['float'])
self.assertAlmostEqual(expected_ad_delay, ad_delay)
self.assertAlmostEqual(expected_ad_width, ad_width)
self.assertAlmostEqual(expected_ad_blanking_width, ad_blanking_width)
self.assertAlmostEqual(expected_ad_gain, ad_gain)
self.assertAlmostEqual(expected_ad_offset, ad_offset)
self.assertAlmostEqual(expected_ad_trigger_level, ad_trigger_level)
self.assertAlmostEqual(expected_ad_trigger_rate, ad_trigger_rate)
class TestUTWinCScanDataFile(unittest.TestCase):
def setUp(self):
self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData.csc')
self.cscan_datafile = dataio.UTWinCScanDataFile(self.sample_data_file)
def test_get_scan_version(self):
self.assertEqual(self.cscan_datafile.get_scan_version(), 117)
def test_read_scan_properties(self):
important_scan_properties = {'n_height':320,
'n_width':600,
'rf_length':2994,
'channel_active':[1, 0, 0, 0, 0, 0, 0, 0]}
for idx in important_scan_properties.keys():
prop = important_scan_properties[idx]
if not isinstance(prop, list):
self.assertEqual(prop, self.cscan_datafile.scan_properties[idx])
else:
self.assertListEqual(prop, self.cscan_datafile.scan_properties[idx])
def test_read_tof_data(self):
tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')
tof_resolution = 0.01
assert(os.path.exists(tof_data_file))
expected_tof_data = np.load(tof_data_file) * tof_resolution
self.cscan_datafile.read_tof_data()
numpy.testing.assert_array_almost_equal(expected_tof_data, self.cscan_datafile.data['tof'][0], decimal=3)
def test_read_amplitude_data(self):
amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')
assert(os.path.exists(amp_data_file))
expected_amp_data = np.load(amp_data_file)
self.cscan_datafile.read_amplitude_data()
self.assertTrue(np.array_equal(expected_amp_data, self.cscan_datafile.data['amplitude'][0]))
def test_import_tof(self):
tof_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_tofdata.npy')
tof_resolution = 0.01
csc_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData')
assert(os.path.exists(tof_data_file))
expected_tof_data = np.load(tof_data_file) * tof_resolution
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(csc_data_file) + "_tofdata0.csc.hdf5")
self.cscan_datafile.import_tof_data()
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
numpy.testing.assert_array_almost_equal(expected_tof_data, read_data, decimal=3)
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
def test_import_amp(self):
amp_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData_ampdata.npy')
csc_data_file = os.path.join(os.path.dirname(__file__), 'support_files', 'CScanData')
assert(os.path.exists(amp_data_file))
expected_amp_data = np.load(amp_data_file)
dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(csc_data_file) + "_ampdata0.csc.hdf5")
self.cscan_datafile.import_amplitude_data()
self.assertTrue(os.path.exists(dest_file))
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(expected_amp_data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
class TestWinspectReader(unittest.TestCase):
def setUp(self):
self.sample_data_file = os.path.join(os.path.dirname(__file__), 'support_files',
'sample_data.sdt')
assert(os.path.exists(self.sample_data_file))
self.scan_reader = dataio.WinspectReader(self.sample_data_file)
def test_find_numbers(self):
float_strings = {"0.000000 mm":0.0, "0.775995 Usec":0.775995}
int_strings = {"35 18 0 22 3 112 ":[35, 18, 0, 22, 3, 112],
"Number of Sample Points : 3500":3500}
bad_strings = {"Ramshackle":[], "":[]}
for string in float_strings:
self.assertAlmostEqual(float_strings[string], self.scan_reader.find_numbers(string))
def test_get_winspect_data(self):
data_reader = dataio.WinspectDataFile(self.sample_data_file)
data_reader.read_data()
expected_data_list = data_reader.datasets
retrieved_data_list = self.scan_reader.get_winspect_data()
self.assertEqual(len(expected_data_list), len(retrieved_data_list))
for data_array_idx in range(len(expected_data_list)):
self.assertTrue(np.array_equal(expected_data_list[data_array_idx].data, retrieved_data_list[data_array_idx].data))
def test_import_winspect(self):
output_basename, ext = os.path.splitext(self.sample_data_file)
amp_dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(output_basename) + "_ampdata0" + ext + ".hdf5")
waveform_dest_file = os.path.join(pathfinder.data_path(),
os.path.basename(output_basename) + "_waveformdata0" + ext + ".hdf5")
self.scan_reader.import_winspect()
data_reader = dataio.WinspectDataFile(self.sample_data_file)
data_reader.read_data()
expected_data_list = data_reader.datasets
for dataset in expected_data_list:
if "amplitude" in dataset.data_type:
dest_file = amp_dest_file
elif "waveform" in dataset.data_type:
dest_file = waveform_dest_file
with h5py.File(dest_file, "r") as fidin:
root, ext = os.path.splitext(os.path.basename(dest_file))
for key in fidin.keys():
if key.startswith(root):
read_data = fidin[key][...]
self.assertTrue(np.array_equal(dataset.data, read_data))
try:
if os.path.exists(dest_file):
os.remove(dest_file)
except WindowsError:
pass
if __name__ == "__main__":
random.seed()
unittest.main() | true | true |
f72fca48a62d8d293aa54c0897823b567e43a32a | 2,008 | py | Python | tests/test_RunningStats.py | gratuxri/play-chess-with-a-webcam | 9ef7ec306a2a612871fba83130ebee1f044ef0c1 | [
"Apache-2.0"
] | 17 | 2019-10-25T01:33:43.000Z | 2022-03-21T03:31:56.000Z | tests/test_RunningStats.py | gratuxri/play-chess-with-a-webcam | 9ef7ec306a2a612871fba83130ebee1f044ef0c1 | [
"Apache-2.0"
] | 34 | 2019-10-17T06:52:30.000Z | 2022-01-19T12:45:43.000Z | tests/test_RunningStats.py | gratuxri/play-chess-with-a-webcam | 9ef7ec306a2a612871fba83130ebee1f044ef0c1 | [
"Apache-2.0"
] | 4 | 2019-11-29T09:19:38.000Z | 2021-10-13T03:12:25.000Z | #!/usr/bin/python3
# part of https://github.com/WolfgangFahl/play-chess-with-a-webcam
from pcwawc.runningstats import RunningStats, ColorStats, MovingAverage
import pytest
from unittest import TestCase
class RunningStatsTest(TestCase):
def test_RunningStats(self):
rs = RunningStats()
rs.push(17.0);
rs.push(19.0);
rs.push(24.0);
mean = rs.mean();
variance = rs.variance();
stdev = rs.standard_deviation();
print ("mean=%f variance=%f stdev=%f" % (mean, variance, stdev))
assert mean == 20.0
assert variance == 13.0
assert stdev == pytest.approx(3.605551, 0.00001)
def test_ColorStats(self):
colors = [(100, 100, 100), (90, 100, 90), (80, 90, 80), (110, 110, 120)]
colorStats = ColorStats()
for color in colors:
r, g, b = color
colorStats.push(r, g, b)
cm = colorStats.mean();
mR, mG, mB = cm
vR, vG, vB = colorStats.variance();
sR, sG, sB = colorStats.standard_deviation();
print ("mean=%f,%f,%f variance=%f,%f,%f stdev=%f,%f,%f" % (mR, mG, mB, vR, vG, vB, sR, sG, sB))
assert cm == (95.0, 100.0, 97.5)
prec = 0.000001
assert vR == pytest.approx(166.666667, prec)
assert vG == pytest.approx(66.666667, prec)
assert vB == pytest.approx(291.666667, prec)
assert sR == pytest.approx(12.909944, prec)
assert sG == pytest.approx(8.164966, prec)
assert sB == pytest.approx(17.078251, prec)
def test_MovingAverage(self):
values=[10,12,17,19,24,17,13,12,8]
means=[10,11,13,16,20,20,18,14,11]
gs=[0,2,3.5,3.5,3.5,-1,-5.5,-2.5,-2.5]
ma=MovingAverage(3)
index=0
for value in values:
ma.push(value)
print ("%d: %f %f %f" % (index,value,ma.mean(),ma.gradient()))
assert means[index]==ma.mean()
assert gs[index]==ma.gradient()
index+=1 | 36.509091 | 103 | 0.556275 |
from pcwawc.runningstats import RunningStats, ColorStats, MovingAverage
import pytest
from unittest import TestCase
class RunningStatsTest(TestCase):
def test_RunningStats(self):
rs = RunningStats()
rs.push(17.0);
rs.push(19.0);
rs.push(24.0);
mean = rs.mean();
variance = rs.variance();
stdev = rs.standard_deviation();
print ("mean=%f variance=%f stdev=%f" % (mean, variance, stdev))
assert mean == 20.0
assert variance == 13.0
assert stdev == pytest.approx(3.605551, 0.00001)
def test_ColorStats(self):
colors = [(100, 100, 100), (90, 100, 90), (80, 90, 80), (110, 110, 120)]
colorStats = ColorStats()
for color in colors:
r, g, b = color
colorStats.push(r, g, b)
cm = colorStats.mean();
mR, mG, mB = cm
vR, vG, vB = colorStats.variance();
sR, sG, sB = colorStats.standard_deviation();
print ("mean=%f,%f,%f variance=%f,%f,%f stdev=%f,%f,%f" % (mR, mG, mB, vR, vG, vB, sR, sG, sB))
assert cm == (95.0, 100.0, 97.5)
prec = 0.000001
assert vR == pytest.approx(166.666667, prec)
assert vG == pytest.approx(66.666667, prec)
assert vB == pytest.approx(291.666667, prec)
assert sR == pytest.approx(12.909944, prec)
assert sG == pytest.approx(8.164966, prec)
assert sB == pytest.approx(17.078251, prec)
def test_MovingAverage(self):
values=[10,12,17,19,24,17,13,12,8]
means=[10,11,13,16,20,20,18,14,11]
gs=[0,2,3.5,3.5,3.5,-1,-5.5,-2.5,-2.5]
ma=MovingAverage(3)
index=0
for value in values:
ma.push(value)
print ("%d: %f %f %f" % (index,value,ma.mean(),ma.gradient()))
assert means[index]==ma.mean()
assert gs[index]==ma.gradient()
index+=1 | true | true |
f72fcbddac8b795d0d38b329417686484d875719 | 2,006 | py | Python | jabberbot/_tests/test_capat.py | RealTimeWeb/wikisite | 66a22c68c172f0ebb3c88a9885ccd33e2d59c3c5 | [
"Apache-2.0"
] | null | null | null | jabberbot/_tests/test_capat.py | RealTimeWeb/wikisite | 66a22c68c172f0ebb3c88a9885ccd33e2d59c3c5 | [
"Apache-2.0"
] | null | null | null | jabberbot/_tests/test_capat.py | RealTimeWeb/wikisite | 66a22c68c172f0ebb3c88a9885ccd33e2d59c3c5 | [
"Apache-2.0"
] | 1 | 2020-01-09T04:53:32.000Z | 2020-01-09T04:53:32.000Z | # -*- coding: utf-8 -*-
import py
try:
from jabberbot import capat
except ImportError:
py.test.skip("Skipping jabber bot tests - pyxmpp is not installed")
def test_ver_simple():
# example values supplied by the XEP
ident = (("client", "pc"), )
feat = ("http://jabber.org/protocol/disco#info",
"http://jabber.org/protocol/disco#items",
"http://jabber.org/protocol/muc",
)
assert capat.generate_ver(ident, feat) == "8RovUdtOmiAjzj+xI7SK5BCw3A8="
def test_ver_complex():
# this test should verify that ordering works properly
ident = (("client", "animal"),
("client", "bear"), # type ordering after category ordering
("apples", "bar"),
("apple", "foo"), # "apples" starts with "apple"
# thus it's greater
)
feat = ()
expected = capat.hash_new('sha1')
expected.update("apple/foo<apples/bar<client/animal<client/bear<")
expected = capat.base64.b64encode(expected.digest())
assert capat.generate_ver(ident, feat) == expected
def test_xml():
try:
import pyxmpp.iq
except ImportError:
py.test.skip("pyxmpp needs to be installed for this test")
x = pyxmpp.iq.Iq(stanza_type='result', stanza_id='disco1',
from_jid='romeo@montague.lit/orchard',
to_jid='juliet@capulet.lit/chamber')
y = x.new_query(ns_uri='http://jabber.org/protocol/disco#info')
z = y.newChild(None, 'identity', None)
z.setProp('category', 'client')
z.setProp('type', 'pc')
y.newChild(None, 'feature', None).setProp(
'var', 'http://jabber.org/protocol/disco#info')
y.newChild(None, 'feature', None).setProp(
'var', 'http://jabber.org/protocol/disco#items')
y.newChild(None, 'feature', None).setProp(
'var', 'http://jabber.org/protocol/muc')
assert capat.hash_iq(x) == "8RovUdtOmiAjzj+xI7SK5BCw3A8="
# hash value taken from `test_ver_simple`
| 34.586207 | 76 | 0.612164 |
import py
try:
from jabberbot import capat
except ImportError:
py.test.skip("Skipping jabber bot tests - pyxmpp is not installed")
def test_ver_simple():
ident = (("client", "pc"), )
feat = ("http://jabber.org/protocol/disco#info",
"http://jabber.org/protocol/disco#items",
"http://jabber.org/protocol/muc",
)
assert capat.generate_ver(ident, feat) == "8RovUdtOmiAjzj+xI7SK5BCw3A8="
def test_ver_complex():
ident = (("client", "animal"),
("client", "bear"),
("apples", "bar"),
("apple", "foo"),
)
feat = ()
expected = capat.hash_new('sha1')
expected.update("apple/foo<apples/bar<client/animal<client/bear<")
expected = capat.base64.b64encode(expected.digest())
assert capat.generate_ver(ident, feat) == expected
def test_xml():
try:
import pyxmpp.iq
except ImportError:
py.test.skip("pyxmpp needs to be installed for this test")
x = pyxmpp.iq.Iq(stanza_type='result', stanza_id='disco1',
from_jid='romeo@montague.lit/orchard',
to_jid='juliet@capulet.lit/chamber')
y = x.new_query(ns_uri='http://jabber.org/protocol/disco
z = y.newChild(None, 'identity', None)
z.setProp('category', 'client')
z.setProp('type', 'pc')
y.newChild(None, 'feature', None).setProp(
'var', 'http://jabber.org/protocol/disco
y.newChild(None, 'feature', None).setProp(
'var', 'http://jabber.org/protocol/disco
y.newChild(None, 'feature', None).setProp(
'var', 'http://jabber.org/protocol/muc')
assert capat.hash_iq(x) == "8RovUdtOmiAjzj+xI7SK5BCw3A8="
# hash value taken from `test_ver_simple`
| true | true |
f72fcdb0009d66ce95554b101bd82a076188d8f3 | 1,140 | py | Python | where_to_go/places/models.py | MZen2610/Yandex-poster | 07b1e44974783563c394b22625aa2543d74552f9 | [
"MIT"
] | null | null | null | where_to_go/places/models.py | MZen2610/Yandex-poster | 07b1e44974783563c394b22625aa2543d74552f9 | [
"MIT"
] | null | null | null | where_to_go/places/models.py | MZen2610/Yandex-poster | 07b1e44974783563c394b22625aa2543d74552f9 | [
"MIT"
] | null | null | null | from django.db import models
class Place(models.Model):
title = models.CharField(max_length=150, verbose_name='Наименование')
description_short = models.TextField(blank=True, verbose_name='Краткое описание')
description_long = models.TextField(blank=True, verbose_name='Полное описание')
lng = models.DecimalField(max_digits=17, decimal_places=14, verbose_name='Долгота')
lat = models.DecimalField(max_digits=17, decimal_places=14, verbose_name='Широта')
def __str__(self):
return self.title
class Meta:
verbose_name = 'Место'
verbose_name_plural = 'Места'
ordering = ['title']
class Images(models.Model):
title = models.ForeignKey('Place', on_delete=models.SET_NULL, null=True, verbose_name='Место', blank=False)
num = models.IntegerField(verbose_name='Позиция')
image = models.ImageField(upload_to='photos/%Y/%m/%d', blank=True, verbose_name='Изображение', null=True)
def __str__(self):
return f"{self.num} {self.title}"
class Meta:
verbose_name = 'Изображение'
verbose_name_plural = 'Изображения'
ordering = ['-num']
| 35.625 | 111 | 0.7 | from django.db import models
class Place(models.Model):
title = models.CharField(max_length=150, verbose_name='Наименование')
description_short = models.TextField(blank=True, verbose_name='Краткое описание')
description_long = models.TextField(blank=True, verbose_name='Полное описание')
lng = models.DecimalField(max_digits=17, decimal_places=14, verbose_name='Долгота')
lat = models.DecimalField(max_digits=17, decimal_places=14, verbose_name='Широта')
def __str__(self):
return self.title
class Meta:
verbose_name = 'Место'
verbose_name_plural = 'Места'
ordering = ['title']
class Images(models.Model):
title = models.ForeignKey('Place', on_delete=models.SET_NULL, null=True, verbose_name='Место', blank=False)
num = models.IntegerField(verbose_name='Позиция')
image = models.ImageField(upload_to='photos/%Y/%m/%d', blank=True, verbose_name='Изображение', null=True)
def __str__(self):
return f"{self.num} {self.title}"
class Meta:
verbose_name = 'Изображение'
verbose_name_plural = 'Изображения'
ordering = ['-num']
| true | true |
f72fcde236c44645e86f524db09b26ce3bfb931b | 3,649 | py | Python | server/core.py | cwza/deep_t2i | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | [
"Apache-2.0"
] | null | null | null | server/core.py | cwza/deep_t2i | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | [
"Apache-2.0"
] | null | null | null | server/core.py | cwza/deep_t2i | 22877fdd28ad407984ddc3bc4d57109c54c22fc0 | [
"Apache-2.0"
] | 1 | 2020-11-30T06:11:02.000Z | 2020-11-30T06:11:02.000Z | import os
from pathlib import Path
import numpy as np
from PIL import Image
import requests
from google.cloud import storage
import base64
from io import BytesIO
import uuid
__all__ = ['do', 'recaptcha_check']
def predict_and2jpg(model, cap):
''' cap: "white hair yellow eyes", returns: jpeg file buffer remember to close it or use with '''
img, _ = model.predict(cap)
img = Image.fromarray(np.uint8(img.numpy()))
buf = BytesIO()
img.save(buf, format='JPEG')
buf.seek(0)
return buf
# import matplotlib.pyplot as plt
# from deep_t2i.model_anime_heads import ExportedModel
# from deep_t2i.inference_anime_heads import predict
# model = ExportedModel.from_pretrained('./anime_heads.pt')
# with predict_and2jpg(model, "white hair yellow eyes") as buf:
# img = Image.open(buf)
# plt.imshow(img)
# plt.show()
gs_bucket_id = os.getenv('gs_bucket_id')
def upload_to_gs(client, img_file):
"upload img_file to google storage name it fname and return url"
bucket = client.bucket(gs_bucket_id)
fname = f'{uuid.uuid4().hex[:8]}.jpg'
blob = bucket.blob(fname)
blob.upload_from_file(img_file, content_type="image/jpeg")
return f'https://storage.googleapis.com/{gs_bucket_id}/{fname}'
# from deep_t2i.model_anime_heads import ExportedModel
# from deep_t2i.inference_anime_heads import predict
# gs_client = storage.Client()
# model = ExportedModel.from_pretrained('./anime_heads.pt')
# with predict_and2jpg(model, "white hair yellow eyes") as buf:
# url = upload_to_gs(gs_client, buf)
# print(url)
imgur_client_id = os.getenv('imgur_client_id')
def upload_to_imgur(img_file):
"upload img_file to imgur and return url"
img = img_file.read()
img = base64.standard_b64encode(img)
url = "https://api.imgur.com/3/image"
data = {'image': img, 'type': 'base64'}
headers = { 'Authorization': f'Client-ID {imgur_client_id}' }
res = requests.post(url, headers=headers, data=data).json()
if res['success']: return res["data"]["link"]
else:
raise Exception("Failed to upload to imgur")
# from deep_t2i.model_anime_heads import ExportedModel
# from deep_t2i.inference_anime_heads import predict
# model = ExportedModel.from_pretrained('./anime_heads.pt')
# with predict_and2jpg(model, "white hair yellow eyes") as buf:
# url = upload_to_imgur(buf)
# print(url)
def save_to_tmp(img_file):
" save img_file to ./tmp_jpg/ "
img = Image.open(img_file)
fname = f'{uuid.uuid4().hex[:8]}.jpg'
path = f'./temp_jpg/{fname}'
img.save(path)
return path
# from deep_t2i.model_anime_heads import ExportedModel
# from deep_t2i.inference_anime_heads import predict
# model = ExportedModel.from_pretrained('./anime_heads.pt')
# with predict_and2jpg(model, "white hair yellow eyes") as buf:
# url = save_to_tmp(buf)
# print(url)
img_server = os.getenv("img_server")
gs_client = storage.Client() if img_server=="gs" else None
def do(model, cap):
"generate image from model, upload image to img_server and return link"
with predict_and2jpg(model, cap) as buf:
if img_server=="gs":
url = upload_to_gs(gs_client, buf)
elif img_server=="imgur":
url = upload_to_imgur(buf)
else:
url = save_to_tmp(buf)
return url
# Recaptcha check
recaptcha_secret = os.getenv('recaptcha_secret')
def recaptcha_check(token):
if token is None: return False
url = "https://www.google.com/recaptcha/api/siteverify"
data = {
'secret': recaptcha_secret,
'response': token,
}
r = requests.post(url=url, data=data)
return r.json()['success']
| 34.424528 | 101 | 0.698822 | import os
from pathlib import Path
import numpy as np
from PIL import Image
import requests
from google.cloud import storage
import base64
from io import BytesIO
import uuid
__all__ = ['do', 'recaptcha_check']
def predict_and2jpg(model, cap):
img, _ = model.predict(cap)
img = Image.fromarray(np.uint8(img.numpy()))
buf = BytesIO()
img.save(buf, format='JPEG')
buf.seek(0)
return buf
gs_bucket_id = os.getenv('gs_bucket_id')
def upload_to_gs(client, img_file):
bucket = client.bucket(gs_bucket_id)
fname = f'{uuid.uuid4().hex[:8]}.jpg'
blob = bucket.blob(fname)
blob.upload_from_file(img_file, content_type="image/jpeg")
return f'https://storage.googleapis.com/{gs_bucket_id}/{fname}'
imgur_client_id = os.getenv('imgur_client_id')
def upload_to_imgur(img_file):
img = img_file.read()
img = base64.standard_b64encode(img)
url = "https://api.imgur.com/3/image"
data = {'image': img, 'type': 'base64'}
headers = { 'Authorization': f'Client-ID {imgur_client_id}' }
res = requests.post(url, headers=headers, data=data).json()
if res['success']: return res["data"]["link"]
else:
raise Exception("Failed to upload to imgur")
def save_to_tmp(img_file):
img = Image.open(img_file)
fname = f'{uuid.uuid4().hex[:8]}.jpg'
path = f'./temp_jpg/{fname}'
img.save(path)
return path
img_server = os.getenv("img_server")
gs_client = storage.Client() if img_server=="gs" else None
def do(model, cap):
with predict_and2jpg(model, cap) as buf:
if img_server=="gs":
url = upload_to_gs(gs_client, buf)
elif img_server=="imgur":
url = upload_to_imgur(buf)
else:
url = save_to_tmp(buf)
return url
recaptcha_secret = os.getenv('recaptcha_secret')
def recaptcha_check(token):
if token is None: return False
url = "https://www.google.com/recaptcha/api/siteverify"
data = {
'secret': recaptcha_secret,
'response': token,
}
r = requests.post(url=url, data=data)
return r.json()['success']
| true | true |
f72fce083693b057598af0c60439146c9ccc930a | 1,461 | py | Python | Task2D.py | dan7267/1a-flood-risk-project-93 | d95cee987f5673d637626e1804f719371a25daa8 | [
"MIT"
] | null | null | null | Task2D.py | dan7267/1a-flood-risk-project-93 | d95cee987f5673d637626e1804f719371a25daa8 | [
"MIT"
] | null | null | null | Task2D.py | dan7267/1a-flood-risk-project-93 | d95cee987f5673d637626e1804f719371a25daa8 | [
"MIT"
] | null | null | null | # Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
import datetime
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list
def run():
"""Requirements for Task2D"""
# Build list of stations
stations = build_station_list()
# Station name to find
station_name = "Cam"
# Find station
station_cam = None
for station in stations:
if station.name == station_name:
station_cam = station
break
# Check that station could be found. Return if not found.
if not station_cam:
print("Station {} could not be found".format(station_name))
return
# Alternative find station 'Cam' using the Python 'next' function
# (https://docs.python.org/3/library/functions.html#next). Raises
# an exception if station is not found.
# try:
# station_cam = next(s for s in stations if s.name == station_name)
# except StopIteration:
# print("Station {} could not be found".format(station_name))
# return
print(station_cam)
# Fetch data over past 2 days
dt = 2
dates, levels = fetch_measure_levels(
station_cam.measure_id, dt=datetime.timedelta(days=dt))
# Print level history
for date, level in zip(dates, levels):
print(date, level)
if __name__ == "__main__":
print("*** Task 2D: CUED Part IA Flood Warning System ***")
run()
| 27.055556 | 75 | 0.659138 |
import datetime
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list
def run():
stations = build_station_list()
station_name = "Cam"
station_cam = None
for station in stations:
if station.name == station_name:
station_cam = station
break
if not station_cam:
print("Station {} could not be found".format(station_name))
return
print(station_cam)
dt = 2
dates, levels = fetch_measure_levels(
station_cam.measure_id, dt=datetime.timedelta(days=dt))
for date, level in zip(dates, levels):
print(date, level)
if __name__ == "__main__":
print("*** Task 2D: CUED Part IA Flood Warning System ***")
run()
| true | true |
f72fce4b9ca244110fc20c8066f330fd436dbac7 | 769 | py | Python | contacts/server/server.py | alfredoroblesa/contacts-tool | 7b9d9ddbaa3ac1f2fc1210aa11958043a79d2e05 | [
"MIT"
] | null | null | null | contacts/server/server.py | alfredoroblesa/contacts-tool | 7b9d9ddbaa3ac1f2fc1210aa11958043a79d2e05 | [
"MIT"
] | null | null | null | contacts/server/server.py | alfredoroblesa/contacts-tool | 7b9d9ddbaa3ac1f2fc1210aa11958043a79d2e05 | [
"MIT"
] | null | null | null | import os
import json
from flask import Flask, render_template
DATABASE_PATH = "../.contacts-store"
# Read database and build HTML string
file_names = os.listdir(DATABASE_PATH)
file_names.remove(".git")
html = "<table><th>Contact</th><th>Last Name</th><th>Tlf</th><th>Email</th><th>Job</th><th>Province</th>"
for file_name in file_names:
file_path = os.path.join(DATABASE_PATH, file_name)
with open(file_path, 'r') as f:
data = json.load(f)
data['name'] = file_name
html += f"<tr><td>{data['name']}</td><td>{data['last_name']}</td><td>{data['tlf']}</td><td>{data['email']}</td><td>{data['job']}</td><td>{data['province']}</td></tr>"
# Create Flask app
server = Flask(__name__)
@server.route("/")
def contacts_table():
return html | 33.434783 | 174 | 0.654096 | import os
import json
from flask import Flask, render_template
DATABASE_PATH = "../.contacts-store"
file_names = os.listdir(DATABASE_PATH)
file_names.remove(".git")
html = "<table><th>Contact</th><th>Last Name</th><th>Tlf</th><th>Email</th><th>Job</th><th>Province</th>"
for file_name in file_names:
file_path = os.path.join(DATABASE_PATH, file_name)
with open(file_path, 'r') as f:
data = json.load(f)
data['name'] = file_name
html += f"<tr><td>{data['name']}</td><td>{data['last_name']}</td><td>{data['tlf']}</td><td>{data['email']}</td><td>{data['job']}</td><td>{data['province']}</td></tr>"
server = Flask(__name__)
@server.route("/")
def contacts_table():
return html | true | true |
f72fcf78d29e01d35333b5634ff87c068a1f35d6 | 13,176 | py | Python | Networks/4_layer_net_Parameter_optimization.py | Kohulan/Decimer-Python | 17373e02faedb28ba94742f61001bb3c6b015798 | [
"MIT"
] | 5 | 2019-07-24T14:18:07.000Z | 2021-11-08T00:35:55.000Z | Networks/4_layer_net_Parameter_optimization.py | Kohulan/Decimer-Python | 17373e02faedb28ba94742f61001bb3c6b015798 | [
"MIT"
] | null | null | null | Networks/4_layer_net_Parameter_optimization.py | Kohulan/Decimer-Python | 17373e02faedb28ba94742f61001bb3c6b015798 | [
"MIT"
] | 5 | 2020-09-16T13:01:31.000Z | 2022-01-24T06:26:06.000Z | '''
* This Software is under the MIT License
* Refer to LICENSE or https://opensource.org/licenses/MIT for more information
* Written by Kohulan Rajan
* © 2019
'''
#Parallelized datareading network
import tensorflow as tf
import os
import sys
import numpy as np
import matplotlib as mpl
import csv
mpl.use('Agg')
import matplotlib.pyplot as plt
from datetime import datetime
from numpy import array
import pickle
import lz4.frame as lz
import multiprocessing
np.set_printoptions(threshold=np.nan)
#Set the Desired Gpu from the cluster
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
#Set Hidden neurons count
hidden_neurons_list_I = [2,4,8,16,32,64,128,512,1024,2048,4096]
hidden_neurons_list_II = [2,4,8,16,32,64,128,512,1024,2048,4096]
#Set Batch Size
batch_sizer_list = [500,1000]
#Set Learning rate
learning_rate_list = [0.001,0.003,0.005,0.007,0.009,0.01]
#Paramter Optimizing loops
for hidden_neurons_I in range(len(hidden_neurons_list_I)):
for hidden_neurons_II in range(len(hidden_neurons_list_II)):
for batch_sizer in range(len(batch_sizer_list)):
for learning_rate_ in range(len(learning_rate_list)):
f = open("/Results/Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.txt".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]), 'w',0)
sys.stdout = f
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Network Started")
#Data input from image data
#labels
def label_data(is_test=False):
data_path = "train"
if is_test:
data_path = "test"
myFile = open('/Data/Potential'+data_path+'_labels.csv',"r")
labels = []
for row in myFile:
x = int(row.strip().split(",")[1])
labels.append(x)
myFile.close()
return np.asarray(labels)
y_train = label_data()
y_test = label_data(is_test=True)
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Labels loaded !!")
#Image array data
Train_Images = pickle.load( open("/Data/train_compressed.txt","rb"))
Test_Images = pickle.load( open("/Data/test_compressed.txt","rb"))
train_items = Train_Images.items()
test_items = Test_Images.items()
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Loading done! Train",len(train_items))
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Loading done! Test",len(test_items))
#one hot vector transformation
def one_hot(y, n_labels):
mat = np.zeros((len(y), n_labels))
for i, val in enumerate(y):
mat[i, val] = 1
return mat
# Parameters
learning_rate = learning_rate_list[learning_rate_]
training_epochs = 20
batch_size = batch_sizer_list[batch_sizer]
display_step = 1
testbatch_size = 1000
totaltrain_batch = len(train_items)/batch_size
totaltest_batch = len(test_items)/testbatch_size
# Network Parameters
n_hidden_1 = hidden_neurons_list_I[hidden_neurons_I] # 1st layer number of neurons
n_hidden_2 = hidden_neurons_list_II[hidden_neurons_II] # 1st layer number of neurons
n_input = 256*256 # Data input (Image shape: 1024 * 1024)
n_classes = 36 # Bond_Count
# tf Graph input
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None, n_classes])
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Create model
def multilayer_perceptron(x):
# Fully Connected Hidden Layers
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Construct model
logits = multilayer_perceptron(X)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Initializing the variables
init = tf.global_variables_initializer()
# encoding labels to one_hot vectors
y_data_enc = one_hot(y_train, n_classes)
y_test_enc = one_hot(y_test, n_classes)
# Evaluate model (with test logits, for dropout to be disabled)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Evaluate the errors, mean,median and maximum errors
pred = tf.argmax(logits, 1)
pred_difference = tf.subtract(tf.argmax(Y, 1),tf.argmax(logits, 1))
mean_error=[]
median_error=[]
maximum_error=[]
#Initiating data for plots
loss_history = []
acc_history = []
valid_history = []
acc_valid_history = []
difference_history = []
test_loss_history = []
test_accuracy_history = []
print ("Data decompression for test batch started!")
#-----------------------------------------------------------------------------------------------------------------
print ("Total available threads for multiprocessing: ",multiprocessing.cpu_count())
#Decompressing Lines Test
def decomp_test(k):
strarraytest = (lz.decompress(Test_Images.values()[k]))
floatarray_test = np.fromstring(strarraytest, dtype=float, sep=',')
floatarray32_test = np.array(floatarray_test).astype(np.float32)
encoded_array_test=(1.0-floatarray32_test/255.0)
return encoded_array_test
pool_test = multiprocessing.Pool()
def decomp_train(j):
strarray = (lz.decompress(Train_Images.values()[j]))
floatarray = np.fromstring(strarray, dtype=float, sep=',')
floatarray32 = np.array(floatarray).astype(np.float32)
encoded_array=(1.0-floatarray32/255.0)
return encoded_array
pool_train = multiprocessing.Pool()
#Network training
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Training Started")
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.allocator_type = 'BFC'
with tf.Session(config=config) as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0
print ("total batch",totaltrain_batch)
counter=0
total_correct_preds = 0
Train_loss_per_batch = 0
# Loop over all batches
for l in range(totaltrain_batch):
print ("bathc",l)
print ("tests","count",counter,"batchsize",counter+batch_size)
train_batchX = pool_train.map(decomp_train,range(counter,counter+batch_size))
batch_x=train_batchX
batch_y=y_data_enc[counter:(counter+len(train_batchX))]
_, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,Y: batch_y})
Train_loss_per_batch += c
#Validation and calculating training accuracy
_, accu_train = sess.run([loss_op, accuracy], feed_dict={X: batch_x,Y: batch_y})
valid_history.append(accu_train)
total_correct_preds += accu_train
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"train Accuracy:",accu_train)
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),counter,"batch over")
counter += len(train_batchX)
validation_accuracy = total_correct_preds/totaltrain_batch
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Train accuracy:",validation_accuracy)
acc_valid_history.append(validation_accuracy)
loss_history.append(Train_loss_per_batch/totaltrain_batch)
#Testing
counter_test = 0
All_test_loss = 0
All_error = 0
test_accuracy_perbatch = 0
for test_set in range(totaltest_batch):
X_test = pool_test.map(decomp_test,range(counter_test,counter_test+testbatch_size))
Y_test = y_test_enc[counter_test:(counter_test+len(X_test))]
test_acc = accuracy.eval({X: X_test, Y: Y_test})
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Accuracy:", test_acc)
test_accuracy_perbatch += test_acc
test_loss_batch,predict,error = sess.run([loss_op,pred,pred_difference], feed_dict={X: X_test, Y: Y_test})
All_test_loss += test_loss_batch
All_error += error
#print(predict)
counter_test += len(X_test)
#Statistics
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Final Test Accuracy:",test_accuracy_perbatch/totaltest_batch)
mean_error.append(np.absolute(np.mean(All_error/totaltest_batch)))
median_error.append(np.absolute(np.median(All_error/totaltest_batch)))
maximum_error.append(np.absolute(np.amax(All_error/totaltest_batch)))
test_loss_history.append(All_test_loss/totaltest_batch)
test_accuracy_history.append(test_accuracy_perbatch/totaltest_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Epoch:", '%04d' % (epoch+1))
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Optimization Finished!")
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Network completed")
f.close()
pool_train.close()
# Final results for various bond counts
file_append = open('/Results/Final_Report.txt' , 'a+')
sys.stdout = file_append
print("\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------\n")
print("Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.txt".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]))
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Final Train accuracy:",validation_accuracy)
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Final Test Accuracy:",test_accuracy_perbatch/totaltest_batch)
counter_test_x = 0
prediction_difference = 0
for testing in range(totaltest_batch):
X_test = pool_test.map(decomp_test,range(counter_test_x,counter_test_x+testbatch_size))
Y_test = y_test_enc[counter_test_x:(counter_test_x+len(X_test))]
_, predict,prediction_difference_batch = sess.run([loss_op,pred,pred_difference], feed_dict={X: X_test, Y: Y_test})
prediction_difference += prediction_difference_batch
counter_test_x += len(X_test)
prediction_window = np.absolute(prediction_difference)
pool_test.close()
for j in range(10):
count_error = 0
for i in prediction_window:
if i<=j:
count_error+=1
Window_accuracy = float(count_error)/len(test_items)*100
print("Currectly predicted bond count with error less than",j,"bonds, Accuracy ={:.2f}".format(Window_accuracy))
file_append.close()
#Matplot plot depiction
plt.subplot(3,1,1)
plt.plot(loss_history, '-o', label='Train Loss value')
plt.title('Training & Tesing Loss')
plt.xlabel('Epoch x Batches')
plt.ylabel('Loss Value')
plt.plot(test_loss_history, '-o', label='Test Loss value')
plt.xlabel('Epoch x Batches')
plt.ylabel('Loss Value')
plt.legend(ncol=2, loc='upper right')
plt.subplot(3,1,2)
plt.gca().set_ylim([0,1.0])
plt.plot(acc_valid_history, '-o', label='Train Accuracy value')
plt.plot(test_accuracy_history, '-o', label='Test Accuracy value')
#plt.plot(difference_history, '-o', label='Train-Test Accuracy')
plt.title('Train & Test Accuracy')
plt.xlabel('Batches')
plt.ylabel('Accuracy')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3,1,3)
plt.plot(mean_error, '-o', label='Mean of error')
plt.plot(median_error, '-o', label='Median of error')
plt.plot(maximum_error, '-o', label='Maximum error')
plt.xlabel('Batches')
plt.ylabel('Error')
plt.legend(ncol=2, loc='lower right')
plt.gcf().set_size_inches(15, 30)
plt.savefig("/Results/Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.png".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]))
plt.close() | 41.046729 | 245 | 0.652095 |
import tensorflow as tf
import os
import sys
import numpy as np
import matplotlib as mpl
import csv
mpl.use('Agg')
import matplotlib.pyplot as plt
from datetime import datetime
from numpy import array
import pickle
import lz4.frame as lz
import multiprocessing
np.set_printoptions(threshold=np.nan)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
hidden_neurons_list_I = [2,4,8,16,32,64,128,512,1024,2048,4096]
hidden_neurons_list_II = [2,4,8,16,32,64,128,512,1024,2048,4096]
batch_sizer_list = [500,1000]
learning_rate_list = [0.001,0.003,0.005,0.007,0.009,0.01]
for hidden_neurons_I in range(len(hidden_neurons_list_I)):
for hidden_neurons_II in range(len(hidden_neurons_list_II)):
for batch_sizer in range(len(batch_sizer_list)):
for learning_rate_ in range(len(learning_rate_list)):
f = open("/Results/Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.txt".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]), 'w',0)
sys.stdout = f
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Network Started")
def label_data(is_test=False):
data_path = "train"
if is_test:
data_path = "test"
myFile = open('/Data/Potential'+data_path+'_labels.csv',"r")
labels = []
for row in myFile:
x = int(row.strip().split(",")[1])
labels.append(x)
myFile.close()
return np.asarray(labels)
y_train = label_data()
y_test = label_data(is_test=True)
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Labels loaded !!")
Train_Images = pickle.load( open("/Data/train_compressed.txt","rb"))
Test_Images = pickle.load( open("/Data/test_compressed.txt","rb"))
train_items = Train_Images.items()
test_items = Test_Images.items()
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Loading done! Train",len(train_items))
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Loading done! Test",len(test_items))
def one_hot(y, n_labels):
mat = np.zeros((len(y), n_labels))
for i, val in enumerate(y):
mat[i, val] = 1
return mat
learning_rate = learning_rate_list[learning_rate_]
training_epochs = 20
batch_size = batch_sizer_list[batch_sizer]
display_step = 1
testbatch_size = 1000
totaltrain_batch = len(train_items)/batch_size
totaltest_batch = len(test_items)/testbatch_size
n_hidden_1 = hidden_neurons_list_I[hidden_neurons_I]
n_hidden_2 = hidden_neurons_list_II[hidden_neurons_II]
n_input = 256*256
n_classes = 36
X = tf.placeholder("float", [None, n_input])
Y = tf.placeholder("float", [None, n_classes])
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
def multilayer_perceptron(x):
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
logits = multilayer_perceptron(X)
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
init = tf.global_variables_initializer()
y_data_enc = one_hot(y_train, n_classes)
y_test_enc = one_hot(y_test, n_classes)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
pred = tf.argmax(logits, 1)
pred_difference = tf.subtract(tf.argmax(Y, 1),tf.argmax(logits, 1))
mean_error=[]
median_error=[]
maximum_error=[]
loss_history = []
acc_history = []
valid_history = []
acc_valid_history = []
difference_history = []
test_loss_history = []
test_accuracy_history = []
print ("Data decompression for test batch started!")
print ("Total available threads for multiprocessing: ",multiprocessing.cpu_count())
def decomp_test(k):
strarraytest = (lz.decompress(Test_Images.values()[k]))
floatarray_test = np.fromstring(strarraytest, dtype=float, sep=',')
floatarray32_test = np.array(floatarray_test).astype(np.float32)
encoded_array_test=(1.0-floatarray32_test/255.0)
return encoded_array_test
pool_test = multiprocessing.Pool()
def decomp_train(j):
strarray = (lz.decompress(Train_Images.values()[j]))
floatarray = np.fromstring(strarray, dtype=float, sep=',')
floatarray32 = np.array(floatarray).astype(np.float32)
encoded_array=(1.0-floatarray32/255.0)
return encoded_array
pool_train = multiprocessing.Pool()
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Training Started")
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
config.gpu_options.allocator_type = 'BFC'
with tf.Session(config=config) as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0
print ("total batch",totaltrain_batch)
counter=0
total_correct_preds = 0
Train_loss_per_batch = 0
for l in range(totaltrain_batch):
print ("bathc",l)
print ("tests","count",counter,"batchsize",counter+batch_size)
train_batchX = pool_train.map(decomp_train,range(counter,counter+batch_size))
batch_x=train_batchX
batch_y=y_data_enc[counter:(counter+len(train_batchX))]
_, c = sess.run([train_op, loss_op], feed_dict={X: batch_x,Y: batch_y})
Train_loss_per_batch += c
_, accu_train = sess.run([loss_op, accuracy], feed_dict={X: batch_x,Y: batch_y})
valid_history.append(accu_train)
total_correct_preds += accu_train
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"train Accuracy:",accu_train)
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),counter,"batch over")
counter += len(train_batchX)
validation_accuracy = total_correct_preds/totaltrain_batch
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Train accuracy:",validation_accuracy)
acc_valid_history.append(validation_accuracy)
loss_history.append(Train_loss_per_batch/totaltrain_batch)
counter_test = 0
All_test_loss = 0
All_error = 0
test_accuracy_perbatch = 0
for test_set in range(totaltest_batch):
X_test = pool_test.map(decomp_test,range(counter_test,counter_test+testbatch_size))
Y_test = y_test_enc[counter_test:(counter_test+len(X_test))]
test_acc = accuracy.eval({X: X_test, Y: Y_test})
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Accuracy:", test_acc)
test_accuracy_perbatch += test_acc
test_loss_batch,predict,error = sess.run([loss_op,pred,pred_difference], feed_dict={X: X_test, Y: Y_test})
All_test_loss += test_loss_batch
All_error += error
counter_test += len(X_test)
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Final Test Accuracy:",test_accuracy_perbatch/totaltest_batch)
mean_error.append(np.absolute(np.mean(All_error/totaltest_batch)))
median_error.append(np.absolute(np.median(All_error/totaltest_batch)))
maximum_error.append(np.absolute(np.amax(All_error/totaltest_batch)))
test_loss_history.append(All_test_loss/totaltest_batch)
test_accuracy_history.append(test_accuracy_perbatch/totaltest_batch)
if epoch % display_step == 0:
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Epoch:", '%04d' % (epoch+1))
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Optimization Finished!")
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Network completed")
f.close()
pool_train.close()
file_append = open('/Results/Final_Report.txt' , 'a+')
sys.stdout = file_append
print("\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------\n")
print("Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.txt".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]))
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Final Train accuracy:",validation_accuracy)
print (datetime.now().strftime('%Y/%m/%d %H:%M:%S'),"Final Test Accuracy:",test_accuracy_perbatch/totaltest_batch)
counter_test_x = 0
prediction_difference = 0
for testing in range(totaltest_batch):
X_test = pool_test.map(decomp_test,range(counter_test_x,counter_test_x+testbatch_size))
Y_test = y_test_enc[counter_test_x:(counter_test_x+len(X_test))]
_, predict,prediction_difference_batch = sess.run([loss_op,pred,pred_difference], feed_dict={X: X_test, Y: Y_test})
prediction_difference += prediction_difference_batch
counter_test_x += len(X_test)
prediction_window = np.absolute(prediction_difference)
pool_test.close()
for j in range(10):
count_error = 0
for i in prediction_window:
if i<=j:
count_error+=1
Window_accuracy = float(count_error)/len(test_items)*100
print("Currectly predicted bond count with error less than",j,"bonds, Accuracy ={:.2f}".format(Window_accuracy))
file_append.close()
plt.subplot(3,1,1)
plt.plot(loss_history, '-o', label='Train Loss value')
plt.title('Training & Tesing Loss')
plt.xlabel('Epoch x Batches')
plt.ylabel('Loss Value')
plt.plot(test_loss_history, '-o', label='Test Loss value')
plt.xlabel('Epoch x Batches')
plt.ylabel('Loss Value')
plt.legend(ncol=2, loc='upper right')
plt.subplot(3,1,2)
plt.gca().set_ylim([0,1.0])
plt.plot(acc_valid_history, '-o', label='Train Accuracy value')
plt.plot(test_accuracy_history, '-o', label='Test Accuracy value')
plt.title('Train & Test Accuracy')
plt.xlabel('Batches')
plt.ylabel('Accuracy')
plt.legend(ncol=2, loc='lower right')
plt.subplot(3,1,3)
plt.plot(mean_error, '-o', label='Mean of error')
plt.plot(median_error, '-o', label='Median of error')
plt.plot(maximum_error, '-o', label='Maximum error')
plt.xlabel('Batches')
plt.ylabel('Error')
plt.legend(ncol=2, loc='lower right')
plt.gcf().set_size_inches(15, 30)
plt.savefig("/Results/Batch Size_{}_learning_rate_{}_hidden_neurons_{}_x_{}.png".format(batch_sizer_list[batch_sizer],learning_rate_list[learning_rate_],hidden_neurons_list_I[hidden_neurons_I],hidden_neurons_list_II[hidden_neurons_II]))
plt.close() | true | true |
f72fcf8390be1f9d3facd1c0666a534992e527a7 | 10,916 | py | Python | tensorflow_quantum/core/ops/batch_util_test.py | PyJedi/quantum | 3f4a3c320e048b8a8faf3a10339975d2d5366fb6 | [
"Apache-2.0"
] | 1 | 2020-06-01T01:28:36.000Z | 2020-06-01T01:28:36.000Z | tensorflow_quantum/core/ops/batch_util_test.py | PyJedi/quantum | 3f4a3c320e048b8a8faf3a10339975d2d5366fb6 | [
"Apache-2.0"
] | null | null | null | tensorflow_quantum/core/ops/batch_util_test.py | PyJedi/quantum | 3f4a3c320e048b8a8faf3a10339975d2d5366fb6 | [
"Apache-2.0"
] | 1 | 2020-06-07T01:28:01.000Z | 2020-06-07T01:28:01.000Z | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test parallel Cirq simulations."""
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from scipy import stats
import cirq
from tensorflow_quantum.core.ops import batch_util
from tensorflow_quantum.python import util
BATCH_SIZE = 12
N_QUBITS = 5
PAULI_LENGTH = 3
SYMBOLS = ['alpha', 'beta', 'gamma']
def _get_mixed_batch(qubits, symbols, size):
circuit1, resolver1 = util.random_circuit_resolver_batch(qubits, size // 2)
circuit2, resolver2 = util.random_symbol_circuit_resolver_batch(
qubits, symbols, size // 2)
return circuit1 + circuit2, resolver1 + resolver2
def _pad_state(sim, state, n):
if isinstance(sim, cirq.sim.sparse_simulator.Simulator):
state = state.final_state
if isinstance(sim, cirq.DensityMatrixSimulator):
state = state.final_density_matrix
return np.pad(state, (0, (1 << n) - state.shape[-1]),
'constant',
constant_values=-2)
def _expectation_helper(sim, circuit, params, op):
if isinstance(sim, cirq.sim.sparse_simulator.Simulator):
state = sim.simulate(circuit, params).final_state.astype(np.complex128)
return [
op.expectation_from_wavefunction(
state,
dict(
zip(sorted(circuit.all_qubits()),
(j for j in range(len(circuit.all_qubits())))))).real
]
if isinstance(sim, cirq.DensityMatrixSimulator):
state = sim.simulate(circuit, params).final_density_matrix
return [
sum(
x._expectation_from_density_matrix_no_validation(
state,
dict(
zip(sorted(circuit.all_qubits()), (
j
for j in range(len(circuit.all_qubits()))))))
for x in op)
]
return NotImplemented
def _sample_helper(sim, state, n_qubits, n_samples):
if isinstance(sim, cirq.sim.sparse_simulator.Simulator):
return cirq.sample_state_vector(state.final_state,
list(range(n_qubits)),
repetitions=n_samples)
if isinstance(sim, cirq.DensityMatrixSimulator):
return cirq.sample_density_matrix(state.final_density_matrix,
list(range(n_qubits)),
repetitions=n_samples)
return NotImplemented
class BatchUtilTest(tf.test.TestCase, parameterized.TestCase):
"""Test cases for BatchUtils main functions."""
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_simulate_state(self, sim):
"""Test variable sized wavefunction output."""
circuit_batch, resolver_batch = _get_mixed_batch(
cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)
results = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
for circuit, resolver, result in zip(circuit_batch, resolver_batch,
results):
r = _pad_state(sim, sim.simulate(circuit, resolver), N_QUBITS)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.complex64)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_expectation(self, sim):
"""Test expectation."""
qubits = cirq.GridQubit.rect(1, N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
results = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], sim)
for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,
results, ops):
r = _expectation_helper(sim, circuit, resolver, op)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.float32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_sampled_expectation(self, sim):
"""Test expectation."""
qubits = cirq.GridQubit.rect(1, N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
n_samples = [[1000] for _ in range(len(ops))]
results = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)
for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,
results, ops):
r = _expectation_helper(sim, circuit, resolver, op)
self.assertAllClose(r, result, rtol=1.0, atol=1e-1)
self.assertDTypeEqual(results, np.float32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_sample_basic(self, sim):
"""Test sampling."""
n_samples = 1
n_qubits = 8
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit = cirq.Circuit(*cirq.Z.on_each(*qubits[:n_qubits // 2]),
*cirq.X.on_each(*qubits[n_qubits // 2:]))
test_results = batch_util.batch_sample([circuit],
[cirq.ParamResolver({})],
n_samples, sim)
state = sim.simulate(circuit, cirq.ParamResolver({}))
expected_results = _sample_helper(sim, state, len(qubits), n_samples)
self.assertAllEqual(expected_results, test_results[0])
self.assertDTypeEqual(test_results, np.int32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_sample(self, sim):
"""Test sampling."""
n_samples = 2000 * (2**N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)
results = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
tfq_histograms = []
for r in results:
tfq_histograms.append(
np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),
range=(0, 2**N_QUBITS),
bins=2**N_QUBITS)[0])
cirq_histograms = []
for circuit, resolver in zip(circuit_batch, resolver_batch):
state = sim.simulate(circuit, resolver)
r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)
cirq_histograms.append(
np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),
range=(0, 2**N_QUBITS),
bins=2**N_QUBITS)[0])
for a, b in zip(tfq_histograms, cirq_histograms):
self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)
self.assertDTypeEqual(results, np.int32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_empty_circuits(self, sim):
"""Test functions with empty circuits."""
# Common preparation
resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]
circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
qubits = cirq.GridQubit.rect(1, N_QUBITS)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
n_samples = [[1000] for _ in range(len(ops))]
# If there is no op on a qubit, the expectation answer is -2.0
true_expectation = (-2.0,)
# (1) Test expectation
results = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], sim)
for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):
self.assertAllClose(true_expectation, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.float32)
# (2) Test sampled_expectation
results = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)
for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):
self.assertAllClose(true_expectation, result, rtol=1.0, atol=1e-1)
self.assertDTypeEqual(results, np.float32)
# (3) Test state
results = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
for circuit, resolver, result in zip(circuit_batch, resolver_batch,
results):
r = _pad_state(sim, sim.simulate(circuit, resolver), 0)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.complex64)
# (4) Test sampling
n_samples = 2000 * (2**N_QUBITS)
results = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
for circuit, resolver, a in zip(circuit_batch, resolver_batch, results):
state = sim.simulate(circuit, resolver)
r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)
self.assertAllClose(r, a, atol=1e-5)
self.assertDTypeEqual(results, np.int32)
if __name__ == '__main__':
tf.test.main()
| 39.839416 | 80 | 0.600861 |
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from scipy import stats
import cirq
from tensorflow_quantum.core.ops import batch_util
from tensorflow_quantum.python import util
BATCH_SIZE = 12
N_QUBITS = 5
PAULI_LENGTH = 3
SYMBOLS = ['alpha', 'beta', 'gamma']
def _get_mixed_batch(qubits, symbols, size):
circuit1, resolver1 = util.random_circuit_resolver_batch(qubits, size // 2)
circuit2, resolver2 = util.random_symbol_circuit_resolver_batch(
qubits, symbols, size // 2)
return circuit1 + circuit2, resolver1 + resolver2
def _pad_state(sim, state, n):
if isinstance(sim, cirq.sim.sparse_simulator.Simulator):
state = state.final_state
if isinstance(sim, cirq.DensityMatrixSimulator):
state = state.final_density_matrix
return np.pad(state, (0, (1 << n) - state.shape[-1]),
'constant',
constant_values=-2)
def _expectation_helper(sim, circuit, params, op):
if isinstance(sim, cirq.sim.sparse_simulator.Simulator):
state = sim.simulate(circuit, params).final_state.astype(np.complex128)
return [
op.expectation_from_wavefunction(
state,
dict(
zip(sorted(circuit.all_qubits()),
(j for j in range(len(circuit.all_qubits())))))).real
]
if isinstance(sim, cirq.DensityMatrixSimulator):
state = sim.simulate(circuit, params).final_density_matrix
return [
sum(
x._expectation_from_density_matrix_no_validation(
state,
dict(
zip(sorted(circuit.all_qubits()), (
j
for j in range(len(circuit.all_qubits()))))))
for x in op)
]
return NotImplemented
def _sample_helper(sim, state, n_qubits, n_samples):
if isinstance(sim, cirq.sim.sparse_simulator.Simulator):
return cirq.sample_state_vector(state.final_state,
list(range(n_qubits)),
repetitions=n_samples)
if isinstance(sim, cirq.DensityMatrixSimulator):
return cirq.sample_density_matrix(state.final_density_matrix,
list(range(n_qubits)),
repetitions=n_samples)
return NotImplemented
class BatchUtilTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_simulate_state(self, sim):
circuit_batch, resolver_batch = _get_mixed_batch(
cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)
results = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
for circuit, resolver, result in zip(circuit_batch, resolver_batch,
results):
r = _pad_state(sim, sim.simulate(circuit, resolver), N_QUBITS)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.complex64)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_expectation(self, sim):
qubits = cirq.GridQubit.rect(1, N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
results = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], sim)
for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,
results, ops):
r = _expectation_helper(sim, circuit, resolver, op)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.float32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_sampled_expectation(self, sim):
qubits = cirq.GridQubit.rect(1, N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
n_samples = [[1000] for _ in range(len(ops))]
results = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)
for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,
results, ops):
r = _expectation_helper(sim, circuit, resolver, op)
self.assertAllClose(r, result, rtol=1.0, atol=1e-1)
self.assertDTypeEqual(results, np.float32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_sample_basic(self, sim):
n_samples = 1
n_qubits = 8
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit = cirq.Circuit(*cirq.Z.on_each(*qubits[:n_qubits // 2]),
*cirq.X.on_each(*qubits[n_qubits // 2:]))
test_results = batch_util.batch_sample([circuit],
[cirq.ParamResolver({})],
n_samples, sim)
state = sim.simulate(circuit, cirq.ParamResolver({}))
expected_results = _sample_helper(sim, state, len(qubits), n_samples)
self.assertAllEqual(expected_results, test_results[0])
self.assertDTypeEqual(test_results, np.int32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_batch_sample(self, sim):
n_samples = 2000 * (2**N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)
results = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
tfq_histograms = []
for r in results:
tfq_histograms.append(
np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),
range=(0, 2**N_QUBITS),
bins=2**N_QUBITS)[0])
cirq_histograms = []
for circuit, resolver in zip(circuit_batch, resolver_batch):
state = sim.simulate(circuit, resolver)
r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)
cirq_histograms.append(
np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),
range=(0, 2**N_QUBITS),
bins=2**N_QUBITS)[0])
for a, b in zip(tfq_histograms, cirq_histograms):
self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)
self.assertDTypeEqual(results, np.int32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.sim.sparse_simulator.Simulator()
}])
def test_empty_circuits(self, sim):
resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]
circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
qubits = cirq.GridQubit.rect(1, N_QUBITS)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
n_samples = [[1000] for _ in range(len(ops))]
true_expectation = (-2.0,)
results = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], sim)
for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):
self.assertAllClose(true_expectation, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.float32)
results = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)
for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):
self.assertAllClose(true_expectation, result, rtol=1.0, atol=1e-1)
self.assertDTypeEqual(results, np.float32)
results = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
for circuit, resolver, result in zip(circuit_batch, resolver_batch,
results):
r = _pad_state(sim, sim.simulate(circuit, resolver), 0)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.complex64)
n_samples = 2000 * (2**N_QUBITS)
results = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
for circuit, resolver, a in zip(circuit_batch, resolver_batch, results):
state = sim.simulate(circuit, resolver)
r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)
self.assertAllClose(r, a, atol=1e-5)
self.assertDTypeEqual(results, np.int32)
if __name__ == '__main__':
tf.test.main()
| true | true |
f72fcf8e506902e300f338f0524ddabfe7e97eb9 | 13,031 | py | Python | watertap/core/zero_order_properties.py | jalving/watertap | a89bd61deaaca9c30402727545e8223a276c93e6 | [
"BSD-3-Clause-LBNL"
] | null | null | null | watertap/core/zero_order_properties.py | jalving/watertap | a89bd61deaaca9c30402727545e8223a276c93e6 | [
"BSD-3-Clause-LBNL"
] | null | null | null | watertap/core/zero_order_properties.py | jalving/watertap | a89bd61deaaca9c30402727545e8223a276c93e6 | [
"BSD-3-Clause-LBNL"
] | null | null | null | ###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
"""
This module contains the general purpose property package for zero-order
unit models. Zero-order models do not track temperature and pressure, or any
form of energy flow.
"""
from idaes.core import (EnergyBalanceType,
MaterialBalanceType,
MaterialFlowBasis,
PhysicalParameterBlock,
StateBlock,
StateBlockData,
declare_process_block_class)
from idaes.core.components import Solvent, Solute
from idaes.core.phases import LiquidPhase
from idaes.core.util.misc import add_object_reference
from idaes.core.util.initialization import fix_state_vars, revert_state_vars
import idaes.logger as idaeslog
import idaes.core.util.scaling as iscale
from idaes.core.util.exceptions import ConfigurationError
from pyomo.environ import (Expression,
Param,
PositiveReals,
units as pyunits,
Var)
from pyomo.common.config import ConfigValue
# Some more inforation about this module
__author__ = "Andrew Lee"
# Set up logger
_log = idaeslog.getLogger(__name__)
@declare_process_block_class("WaterParameterBlock")
class WaterParameterBlockData(PhysicalParameterBlock):
"""
Property Parameter Block Class
Defines component and phase lists, along with base units and constant
parameters.
"""
CONFIG = PhysicalParameterBlock.CONFIG()
CONFIG.declare('database', ConfigValue(
description='An instance of a WaterTAP Database to use for parameters.'
))
CONFIG.declare('water_source', ConfigValue(
description=
'Water source to use when looking up parameters from database.'))
CONFIG.declare("solute_list", ConfigValue(
domain=list,
description="List of solute species of interest. If None, will use "
"all species defined in the water_source provided."))
def build(self):
'''
Callable method for Block construction.
'''
super().build()
self._state_block_class = WaterStateBlock
self.Liq = LiquidPhase()
self.H2O = Solvent()
# Get component set from database if provided
comp_set = None
if self.config.database is not None:
comp_set = self.config.database.get_solute_set(
self.config.water_source)
# Check definition of solute list
solute_list = self.config.solute_list
if solute_list is None:
# No user-provided solute list, look up list from database
if comp_set is None:
# No solute list in database and none provided.
raise ConfigurationError(
f"{self.name} no solute_list or database was defined. "
f"Users must provide at least one of these arguments.")
else:
solute_list = comp_set
elif self.config.database is not None:
# User provided custom list and database - check that all
# components are supported
for j in solute_list:
if j not in comp_set:
_log.info(f"{self.name} component {j} is not defined in "
f"the water_sources database file.")
else:
# User provided list but no database - assume they know what they
# are doing
pass
for j in solute_list:
self.add_component(str(j), Solute())
# Define default value for mass density of solution
self.dens_mass_default = 1000*pyunits.kg/pyunits.m**3
# Define default value for dynamic viscosity of solution
self.visc_d_default = 0.001*pyunits.kg/pyunits.m/pyunits.s
# ---------------------------------------------------------------------
# Set default scaling factors
self.default_scaling_factor = {
("flow_vol"): 1e3,
("conc_mass_comp"): 1e2}
@classmethod
def define_metadata(cls, obj):
obj.add_default_units({
'time': pyunits.s,
'length': pyunits.m,
'mass': pyunits.kg,
'amount': pyunits.mol,
'temperature': pyunits.K,
})
obj.add_properties(
{'flow_mass_comp': {'method': None},
'flow_vol': {'method': '_flow_vol'},
'conc_mass_comp': {'method': '_conc_mass_comp'},
'dens_mass': {'method': '_dens_mass'},
'visc_d': {'method': '_visc_d'}})
class _WaterStateBlock(StateBlock):
"""
This Class contains methods which should be applied to Property Blocks as a
whole, rather than individual elements of indexed Property Blocks.
"""
def initialize(blk,
state_args=None,
state_vars_fixed=False,
hold_state=False,
outlvl=idaeslog.NOTSET,
solver=None,
optarg=None):
'''
Initialization routine for property package.
Keyword Arguments:
state_args : Dictionary with initial guesses for the state vars
chosen. Note that if this method is triggered
through the control volume, and if initial guesses
were not provied at the unit model level, the
control volume passes the inlet values as initial
guess.The keys for the state_args dictionary are:
flow_mol_comp : value at which to initialize component
flows (default=None)
pressure : value at which to initialize pressure
(default=None)
temperature : value at which to initialize temperature
(default=None)
outlvl : sets output level of initialization routine
state_vars_fixed: Flag to denote if state vars have already been
fixed.
- True - states have already been fixed and
initialization does not need to worry
about fixing and unfixing variables.
- False - states have not been fixed. The state
block will deal with fixing/unfixing.
optarg : solver options dictionary object (default=None, use
default solver options)
solver : str indicating which solver to use during
initialization (default = None, use default solver)
hold_state : flag indicating whether the initialization routine
should unfix any state variables fixed during
initialization (default=False).
- True - states varaibles are not unfixed, and
a dict of returned containing flags for
which states were fixed during
initialization.
- False - state variables are unfixed after
initialization by calling the
relase_state method
Returns:
If hold_states is True, returns a dict containing flags for
which states were fixed during initialization.
'''
# For now, there are no ocnstraints in the property package, so only
# fix state variables if required
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="properties")
init_log.info('Initialization Complete.')
if hold_state is True:
flags = fix_state_vars(blk, state_args)
return flags
else:
return
def release_state(blk, flags, outlvl=idaeslog.NOTSET):
'''
Method to release state variables fixed during initialization.
Keyword Arguments:
flags : dict containing information of which state variables
were fixed during initialization, and should now be
unfixed. This dict is returned by initialize if
hold_state=True.
outlvl : sets output level of of logging
'''
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="properties")
if flags is None:
return
# Unfix state variables
revert_state_vars(blk, flags)
init_log.info('State Released.')
@declare_process_block_class("WaterStateBlock",
block_class=_WaterStateBlock)
class WaterStateBlockData(StateBlockData):
"""
General purpose StateBlock for Zero-Order unit models.
"""
def build(self):
super().build()
# Create state variables
self.flow_mass_comp = Var(self.component_list,
initialize=1,
domain=PositiveReals,
doc='Mass flowrate of each component',
units=pyunits.kg/pyunits.s)
# -------------------------------------------------------------------------
# Other properties
def _conc_mass_comp(self):
def rule_cmc(blk, j):
return (blk.flow_mass_comp[j] /
sum(self.flow_mass_comp[k] for k in self.component_list) *
blk.dens_mass)
self.conc_mass_comp = Expression(self.component_list,
rule=rule_cmc)
def _dens_mass(self):
self.dens_mass = Param(initialize=self.params.dens_mass_default,
units=pyunits.kg/pyunits.m**3,
mutable=True,
doc="Mass density of flow")
def _flow_vol(self):
self.flow_vol = Expression(
expr=sum(self.flow_mass_comp[j] for j in self.component_list) /
self.dens_mass)
def _visc_d(self):
self.visc_d = Param(initialize=self.params.visc_d_default,
units=pyunits.kg/pyunits.m/pyunits.s,
mutable=True,
doc="Dynamic viscosity of solution")
def get_material_flow_terms(blk, p, j):
return blk.flow_mass_comp[j]
def get_enthalpy_flow_terms(blk, p):
raise NotImplementedError
def get_material_density_terms(blk, p, j):
return blk.conc_mass_comp[j]
def get_energy_density_terms(blk, p):
raise NotImplementedError
def default_material_balance_type(self):
return MaterialBalanceType.componentTotal
def default_energy_balance_type(self):
return EnergyBalanceType.none
def define_state_vars(blk):
return {"flow_mass_comp": blk.flow_mass_comp}
def define_display_vars(blk):
return {"Volumetric Flowrate": blk.flow_vol,
"Mass Concentration": blk.conc_mass_comp}
def get_material_flow_basis(blk):
return MaterialFlowBasis.mass
def calculate_scaling_factors(self):
# Get default scale factors and do calculations from base classes
super().calculate_scaling_factors()
d_sf_Q = self.params.default_scaling_factor["flow_vol"]
d_sf_c = self.params.default_scaling_factor["conc_mass_comp"]
for j, v in self.flow_mass_comp.items():
if iscale.get_scaling_factor(v) is None:
iscale.set_scaling_factor(v, d_sf_Q*d_sf_c)
if self.is_property_constructed("flow_vol"):
if iscale.get_scaling_factor(self.flow_vol) is None:
iscale.set_scaling_factor(self.flow_vol, d_sf_Q)
if self.is_property_constructed("conc_mass_comp"):
for j, v in self.conc_mass_comp.items():
sf_c = iscale.get_scaling_factor(self.conc_mass_comp[j])
if sf_c is None:
try:
sf_c = self.params.default_scaling_factor[
("conc_mass_comp", j)]
except KeyError:
sf_c = d_sf_c
iscale.set_scaling_factor(self.conc_mass_comp[j], sf_c)
| 39.728659 | 81 | 0.578927 | :
raise NotImplementedError
def get_material_density_terms(blk, p, j):
return blk.conc_mass_comp[j]
def get_energy_density_terms(blk, p):
raise NotImplementedError
def default_material_balance_type(self):
return MaterialBalanceType.componentTotal
def default_energy_balance_type(self):
return EnergyBalanceType.none
def define_state_vars(blk):
return {"flow_mass_comp": blk.flow_mass_comp}
def define_display_vars(blk):
return {"Volumetric Flowrate": blk.flow_vol,
"Mass Concentration": blk.conc_mass_comp}
def get_material_flow_basis(blk):
return MaterialFlowBasis.mass
def calculate_scaling_factors(self):
super().calculate_scaling_factors()
d_sf_Q = self.params.default_scaling_factor["flow_vol"]
d_sf_c = self.params.default_scaling_factor["conc_mass_comp"]
for j, v in self.flow_mass_comp.items():
if iscale.get_scaling_factor(v) is None:
iscale.set_scaling_factor(v, d_sf_Q*d_sf_c)
if self.is_property_constructed("flow_vol"):
if iscale.get_scaling_factor(self.flow_vol) is None:
iscale.set_scaling_factor(self.flow_vol, d_sf_Q)
if self.is_property_constructed("conc_mass_comp"):
for j, v in self.conc_mass_comp.items():
sf_c = iscale.get_scaling_factor(self.conc_mass_comp[j])
if sf_c is None:
try:
sf_c = self.params.default_scaling_factor[
("conc_mass_comp", j)]
except KeyError:
sf_c = d_sf_c
iscale.set_scaling_factor(self.conc_mass_comp[j], sf_c)
| true | true |
f72fcfcd3dc525e47916c5740040252f6d957d98 | 5,279 | py | Python | py/jupyterlite/src/jupyterlite/config.py | marimeireles/jupyterlite | 65c9304cf89d311b8a48f227a0cbb2b7f50cf4bd | [
"BSD-3-Clause"
] | null | null | null | py/jupyterlite/src/jupyterlite/config.py | marimeireles/jupyterlite | 65c9304cf89d311b8a48f227a0cbb2b7f50cf4bd | [
"BSD-3-Clause"
] | null | null | null | py/jupyterlite/src/jupyterlite/config.py | marimeireles/jupyterlite | 65c9304cf89d311b8a48f227a0cbb2b7f50cf4bd | [
"BSD-3-Clause"
] | null | null | null | """an observable configuration object for the JupyterLite lifecycle
.. todo::
Move to a canonical JSON schema?
"""
import os
from pathlib import Path
from typing import Optional as _Optional
from typing import Text as _Text
from typing import Tuple as _Tuple
from traitlets import CInt, Tuple, Unicode, default
from traitlets.config import LoggingConfigurable
from . import constants as C
from .trait_types import CPath, TypedTuple
class LiteBuildConfig(LoggingConfigurable):
"""the description of a JupyterLite build
This is most likely to be configured:
- from environment variables
- in a `pyproject.toml`
- from the command line
With direct instantiation a distant last place.
This is conceptually similar in scale to `jupyter_server_config.json`, and will
piggy-back off of the `{sys.prefix}/share/jupyter_{notebook,server}_config.d/`
loader paths
"""
disable_addons: _Tuple[_Text] = TypedTuple(
Unicode(),
help=("skip loading `entry_point` for these addons. TODO: should be a dict"),
).tag(config=True)
apps: _Tuple[_Text] = TypedTuple(
Unicode(),
help=(
f"""the Lite apps: currently {C.JUPYTERLITE_APPS}. """
f"""Required: {C.JUPYTERLITE_APPS_REQUIRED}"""
),
).tag(config=True)
app_archive: Path = CPath(
help=("The app archive to use. env: JUPYTERLITE_APP_ARCHIVE")
).tag(config=True)
lite_dir: Path = CPath(
help=("The root folder of a JupyterLite project. env: JUPYTERLITE_DIR")
).tag(config=True)
output_dir: Path = CPath(
help=("Where to build the JupyterLite site. env: JUPYTERLITE_OUTPUT_DIR")
).tag(config=True)
output_archive: Path = CPath(
help=("Archive to create. env: JUPYTERLITE_OUTPUT_ARCHIVE")
).tag(config=True)
files: _Tuple[Path] = TypedTuple(
CPath(), help="Files to add and index as Jupyter Contents"
).tag(config=True)
overrides: _Tuple[_Text] = TypedTuple(
CPath(), help=("Specific overrides.json to include")
).tag(config=True)
# serving
port: int = CInt(
help=(
"[serve] the port to (insecurely) expose on http://127.0.0.1."
" env: JUPYTERLITE_PORT"
)
).tag(config=True)
base_url: str = Unicode(
help=("[serve] the prefix to use." " env: JUPYTERLITE_BASE_URL")
).tag(config=True)
# patterns
ignore_files: _Tuple[_Text] = Tuple(
help="Path patterns that should never be included"
).tag(config=True)
source_date_epoch: _Optional[int] = CInt(
allow_none=True,
min=1,
help="Trigger reproducible builds, clamping timestamps to this value",
).tag(config=True)
@default("apps")
def _default_apps(self):
return C.JUPYTERLITE_APPS
@default("disable_addons")
def _default_disable_addons(self):
"""the addons that are disabled by default."""
return []
@default("output_dir")
def _default_output_dir(self):
return Path(
os.environ.get("JUPYTERLITE_OUTPUT_DIR")
or self.lite_dir / C.DEFAULT_OUTPUT_DIR
)
@default("lite_dir")
def _default_lite_dir(self):
return Path(os.environ.get("JUPYTERLITE_DIR", Path.cwd()))
@default("files")
def _default_files(self):
lite_files = self.lite_dir / "files"
if lite_files.is_dir():
return [lite_files]
return []
@default("overrides")
def _default_overrides(self):
all_overrides = []
for app in [None, *self.apps]:
app_dir = self.lite_dir / app if app else self.lite_dir
overrides_json = app_dir / C.OVERRIDES_JSON
if overrides_json.exists():
all_overrides += [overrides_json]
return all_overrides
@default("ignore_files")
def _default_ignore_files(self):
return [
".*\.pyc",
"/\.git/",
"/\.gitignore",
"/\.ipynb_checkpoints/",
"/build/",
"/lib/",
"/dist/",
".*doit.db",
"/node_modules/",
"/envs/",
"/venvs/",
"/\.env",
C.JUPYTERLITE_JSON.replace(".", "\\."),
C.JUPYTERLITE_IPYNB.replace(".", "\\."),
"untitled.*",
"Untitled.*",
f"/{self.output_dir.name}/",
]
@default("app_archive")
def _default_app_archive(self):
return Path(os.environ.get("JUPYTERLITE_APP_ARCHIVE") or C.DEFAULT_APP_ARCHIVE)
@default("output_archive")
def _default_output_archive(self):
return Path(
os.environ.get("JUPYTERLITE_OUTPUT_ARCHIVE")
or self.output_dir / f"{self.lite_dir.name}-jupyterlite.tgz"
)
@default("source_date_epoch")
def _default_source_date_epoch(self):
if C.SOURCE_DATE_EPOCH not in os.environ:
return None
sde = int(os.environ[C.SOURCE_DATE_EPOCH])
return sde
@default("port")
def _default_port(self):
return int(os.environ.get("JUPYTERLITE_PORT", 8000))
@default("base_url")
def _default_base_url(self):
return os.environ.get("JUPYTERLITE_BASE_URL", "/")
| 29.327778 | 87 | 0.614321 | import os
from pathlib import Path
from typing import Optional as _Optional
from typing import Text as _Text
from typing import Tuple as _Tuple
from traitlets import CInt, Tuple, Unicode, default
from traitlets.config import LoggingConfigurable
from . import constants as C
from .trait_types import CPath, TypedTuple
class LiteBuildConfig(LoggingConfigurable):
disable_addons: _Tuple[_Text] = TypedTuple(
Unicode(),
help=("skip loading `entry_point` for these addons. TODO: should be a dict"),
).tag(config=True)
apps: _Tuple[_Text] = TypedTuple(
Unicode(),
help=(
f"""the Lite apps: currently {C.JUPYTERLITE_APPS}. """
f"""Required: {C.JUPYTERLITE_APPS_REQUIRED}"""
),
).tag(config=True)
app_archive: Path = CPath(
help=("The app archive to use. env: JUPYTERLITE_APP_ARCHIVE")
).tag(config=True)
lite_dir: Path = CPath(
help=("The root folder of a JupyterLite project. env: JUPYTERLITE_DIR")
).tag(config=True)
output_dir: Path = CPath(
help=("Where to build the JupyterLite site. env: JUPYTERLITE_OUTPUT_DIR")
).tag(config=True)
output_archive: Path = CPath(
help=("Archive to create. env: JUPYTERLITE_OUTPUT_ARCHIVE")
).tag(config=True)
files: _Tuple[Path] = TypedTuple(
CPath(), help="Files to add and index as Jupyter Contents"
).tag(config=True)
overrides: _Tuple[_Text] = TypedTuple(
CPath(), help=("Specific overrides.json to include")
).tag(config=True)
port: int = CInt(
help=(
"[serve] the port to (insecurely) expose on http://127.0.0.1."
" env: JUPYTERLITE_PORT"
)
).tag(config=True)
base_url: str = Unicode(
help=("[serve] the prefix to use." " env: JUPYTERLITE_BASE_URL")
).tag(config=True)
ignore_files: _Tuple[_Text] = Tuple(
help="Path patterns that should never be included"
).tag(config=True)
source_date_epoch: _Optional[int] = CInt(
allow_none=True,
min=1,
help="Trigger reproducible builds, clamping timestamps to this value",
).tag(config=True)
@default("apps")
def _default_apps(self):
return C.JUPYTERLITE_APPS
@default("disable_addons")
def _default_disable_addons(self):
return []
@default("output_dir")
def _default_output_dir(self):
return Path(
os.environ.get("JUPYTERLITE_OUTPUT_DIR")
or self.lite_dir / C.DEFAULT_OUTPUT_DIR
)
@default("lite_dir")
def _default_lite_dir(self):
return Path(os.environ.get("JUPYTERLITE_DIR", Path.cwd()))
@default("files")
def _default_files(self):
lite_files = self.lite_dir / "files"
if lite_files.is_dir():
return [lite_files]
return []
@default("overrides")
def _default_overrides(self):
all_overrides = []
for app in [None, *self.apps]:
app_dir = self.lite_dir / app if app else self.lite_dir
overrides_json = app_dir / C.OVERRIDES_JSON
if overrides_json.exists():
all_overrides += [overrides_json]
return all_overrides
@default("ignore_files")
def _default_ignore_files(self):
return [
".*\.pyc",
"/\.git/",
"/\.gitignore",
"/\.ipynb_checkpoints/",
"/build/",
"/lib/",
"/dist/",
".*doit.db",
"/node_modules/",
"/envs/",
"/venvs/",
"/\.env",
C.JUPYTERLITE_JSON.replace(".", "\\."),
C.JUPYTERLITE_IPYNB.replace(".", "\\."),
"untitled.*",
"Untitled.*",
f"/{self.output_dir.name}/",
]
@default("app_archive")
def _default_app_archive(self):
return Path(os.environ.get("JUPYTERLITE_APP_ARCHIVE") or C.DEFAULT_APP_ARCHIVE)
@default("output_archive")
def _default_output_archive(self):
return Path(
os.environ.get("JUPYTERLITE_OUTPUT_ARCHIVE")
or self.output_dir / f"{self.lite_dir.name}-jupyterlite.tgz"
)
@default("source_date_epoch")
def _default_source_date_epoch(self):
if C.SOURCE_DATE_EPOCH not in os.environ:
return None
sde = int(os.environ[C.SOURCE_DATE_EPOCH])
return sde
@default("port")
def _default_port(self):
return int(os.environ.get("JUPYTERLITE_PORT", 8000))
@default("base_url")
def _default_base_url(self):
return os.environ.get("JUPYTERLITE_BASE_URL", "/")
| true | true |
f72fcfd6c1a73e3ebdb2254eb93485dc7e9e2ac2 | 10,495 | py | Python | tests/test_expressions.py | thorag76/mappyfile | 51ae914cb6282549b73cde684cbc54e213c74d4a | [
"MIT"
] | 48 | 2017-02-07T23:37:37.000Z | 2021-12-28T12:56:37.000Z | tests/test_expressions.py | thorag76/mappyfile | 51ae914cb6282549b73cde684cbc54e213c74d4a | [
"MIT"
] | 135 | 2017-03-16T08:54:59.000Z | 2022-03-30T20:00:22.000Z | tests/test_expressions.py | thorag76/mappyfile | 51ae914cb6282549b73cde684cbc54e213c74d4a | [
"MIT"
] | 23 | 2017-01-31T08:46:48.000Z | 2021-07-08T15:28:49.000Z | # -*- coding: utf-8 -*-
import logging
import json
import inspect
import pytest
from mappyfile.parser import Parser
from mappyfile.pprint import PrettyPrinter
from mappyfile.transformer import MapfileToDict
def output(s):
"""
Parse, transform, and pretty print
the result
"""
p = Parser()
m = MapfileToDict(include_position=True)
# https://stackoverflow.com/questions/900392/getting-the-caller-function-name-inside-another-function-in-python
logging.info(inspect.stack()[1][3])
ast = p.parse(s)
logging.debug(ast.pretty())
d = m.transform(ast)
logging.debug(json.dumps(d, indent=4))
pp = PrettyPrinter(indent=0, newlinechar=" ", quote="'")
s = pp.pprint(d)
logging.debug(s)
return s
def check_result(s):
try:
s2 = output(s)
assert(s == s2)
except AssertionError:
logging.info(s)
logging.info(s2)
raise
def test_class_expression1():
s = '''
CLASS
TEXT ([area])
END
'''
exp = "CLASS TEXT ([area]) END"
assert(output(s) == exp)
def test_class_expression2():
r"""
shp2img -m C:\Temp\msautotest\query\text.tmp.map -l text_test002 -o c:\temp\tmp_onl0lk.png
"""
s = '''
CLASS
TEXT ("[area]")
END
'''
exp = 'CLASS TEXT ("[area]") END'
assert(output(s) == exp)
def test_complex_class_expression():
s = '''
CLASS
TEXT ("Area is: " + tostring([area],"%.2f"))
END
'''
exp = '''CLASS TEXT ("Area is: " + (tostring([area],"%.2f"))) END'''
assert(output(s) == exp)
def test_or_expressions():
"""
See http://www.mapserver.org/mapfile/expressions.html#expressions
"""
s = '''
CLASS
EXPRESSION ("[style_class]" = "10" OR "[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION ( ( "[style_class]" = "10" ) OR ( "[style_class]" = "20" ) ) END'
assert(output(s) == exp)
s = '''
CLASS
EXPRESSION ("[style_class]" = "10" || "[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION ( ( "[style_class]" = "10" ) OR ( "[style_class]" = "20" ) ) END'
assert(output(s) == exp)
def test_and_expressions():
s = '''
CLASS
EXPRESSION ("[style_class]" = "10" AND "[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION ( ( "[style_class]" = "10" ) AND ( "[style_class]" = "20" ) ) END'
assert(output(s) == exp)
s = '''
CLASS
EXPRESSION ("[style_class]" = "10" && "[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION ( ( "[style_class]" = "10" ) AND ( "[style_class]" = "20" ) ) END'
assert(output(s) == exp)
def test_not_expressions():
s = '''
CLASS
EXPRESSION NOT("[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION NOT ( "[style_class]" = "20" ) END'
assert(output(s) == exp)
s = '''
CLASS
EXPRESSION !("[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION NOT ( "[style_class]" = "20" ) END'
assert(output(s) == exp)
def test_runtime_expression():
s = """
CLASS
EXPRESSION ( [EPPL_Q100_] = %eppl% )
END
"""
exp = "CLASS EXPRESSION ( [EPPL_Q100_] = %eppl% ) END"
# print(output(s))
assert(output(s) == exp)
def test_ne_comparison():
"""
IS NOT is not valid
NE (Not Equals) should be used instead
"""
s = """
CLASS
# EXPRESSION ( "[building]" IS NOT NULL) # incorrect syntax
EXPRESSION ( "[building]" NE NULL)
END
"""
exp = 'CLASS EXPRESSION ( "[building]" NE NULL ) END'
assert(output(s) == exp)
def test_eq_comparison():
"""
Case is not changed for comparison (EQ/eq stay the same)
Uses Earley
"""
s = """
CLASS
EXPRESSION ( "[building]" eq NULL)
END
"""
exp = 'CLASS EXPRESSION ( "[building]" eq NULL ) END'
# print(output(s))
assert(output(s) == exp)
def test_expression():
"""
Addressed in issue #27, now parses successfully.
"""
s = """
CLASS
EXPRESSION ('[construct]' ~* /Br.*$/)
STYLE
ANGLE 360
END
END
"""
exp = "CLASS EXPRESSION ( '[construct]' ~* /Br.*$/ ) STYLE ANGLE 360 END END"
assert(output(s) == exp)
def test_list_expression():
"""
See issue #27
"""
s = """
CLASS
EXPRESSION /NS_Bahn|NS_BahnAuto/
END
"""
exp = "CLASS EXPRESSION /NS_Bahn|NS_BahnAuto/ END"
assert(output(s) == exp)
def test_numerical_operator_ge_expression():
s = """
CLASS
EXPRESSION ([power] ge 10000)
END
"""
exp = "CLASS EXPRESSION ( [power] ge 10000 ) END"
assert(output(s) == exp)
def test_numerical_operator_gt_expression():
s = """
CLASS
EXPRESSION ([power] gt 10000)
END
"""
exp = "CLASS EXPRESSION ( [power] gt 10000 ) END"
assert(output(s) == exp)
def test_numerical_operator_le_expression():
s = """
CLASS
EXPRESSION ([power] le 100)
END
"""
exp = "CLASS EXPRESSION ( [power] le 100 ) END"
assert(output(s) == exp)
def test_numerical_operator_lt_expression():
s = """
CLASS
EXPRESSION ([power] lt 100)
END
"""
exp = "CLASS EXPRESSION ( [power] lt 100 ) END"
assert(output(s) == exp)
def test_divide():
"""
Not sure if these should be in brackets or not
http://mapserver.org/mapfile/expressions.html
Implies with brackets will return a boolean value
and without will return a numeric value
"""
s = """
CLASS
EXPRESSION ([field1] / [field2])
END
"""
exp = "CLASS EXPRESSION ([field1] / [field2]) END"
assert(output(s) == exp)
def test_multiply():
s = """
CLASS
EXPRESSION ([field1] * [field2])
END
"""
exp = "CLASS EXPRESSION ([field1] * [field2]) END"
assert(output(s) == exp)
def test_negation():
"""
TODO - check the exact syntax for this
"""
s = """
CLASS
EXPRESSION (-[field1])
END
"""
exp = "CLASS EXPRESSION (-[field1]) END"
assert(output(s) == exp)
def test_pointless_plus():
# Based on test_negation
s = """
CLASS
EXPRESSION (+[field1])
END
"""
exp = "CLASS EXPRESSION ([field1]) END"
assert(output(s) == exp)
def test_power():
s = """
CLASS
EXPRESSION ([field1] ^ [field2])
END
"""
exp = "CLASS EXPRESSION ([field1] ^ [field2]) END"
assert(output(s) == exp)
def test_divide_expression():
"""
http://mapserver.org/mapfile/expressions.html
Also - * and ^
"""
s = """
CLASS
EXPRESSION ([field1] / [field2] > 0.1)
END
"""
exp = "CLASS EXPRESSION ( [field1] / [field2] > 0.1 ) END"
assert(output(s) == exp)
def test_modulo_expression():
"""
Not currently documented at http://mapserver.org/mapfile/expressions.html
"""
s = """
CLASS
EXPRESSION ( ([height] % 50) = 0 )
END
"""
exp = "CLASS EXPRESSION ( ( [height] % 50 ) = 0 ) END"
assert(output(s) == exp)
def test_escaped_string():
"""
http://mapserver.org/mapfile/expressions.html#quotes-escaping-in-strings
Starting with MapServer 6.0 you don't need to escape single quotes within double quoted strings
and you don't need to escape double quotes within single quoted strings
"""
s = r"""
CLASS
EXPRESSION "National \"hero\" statue"
END
"""
exp = """CLASS EXPRESSION 'National \\"hero\\" statue' END"""
assert(output(s) == exp)
def test_list_expression_alt():
"""
See issue #38
http://mapserver.org/mapfile/expressions.html#list-expressions
These expressions are much more performant in MapServer
List expressions do not support quote escaping, or attribute values that contain a comma in them.
To activate them enclose a comma separated list of values between {}, without adding quotes
or extra spaces.
"""
s = """
CLASS
EXPRESSION {2_Klass,Rte2etr}
END
"""
exp = "CLASS EXPRESSION {2_Klass,Rte2etr} END"
assert(output(s) == exp)
s = """
CLASS
EXPRESSION {2_Klass,class with space}
END
"""
exp = "CLASS EXPRESSION {2_Klass,class with space} END"
assert(output(s) == exp)
def test_class_expression_oddname():
s = '''
CLASS
TEXT ([area:ian])
END
'''
exp = "CLASS TEXT ([area:ian]) END"
assert(output(s) == exp)
def test_class_not_expression_brackets():
"""
See issue #85 - coding of NOT logical expressions #85
Each expression should be bracketed independently and any NOT
clause should be outside the brackets
"""
s = '''
CLASS
EXPRESSION (("[TIME]" eq 'NOW') AND NOT ("[TYPE]" ~ "(something|completely|different)"))
END
'''
exp = '''CLASS EXPRESSION ( ( "[TIME]" eq 'NOW' ) AND NOT ( "[TYPE]" ~ "(something|completely|different)" ) ) END'''
print(output(s))
assert(output(s) == exp)
def test_class_not_expression_no_brackets():
"""
See issue #85 - coding of NOT logical expressions #85
This parses successfully in MapServer but not in mappyfile
"""
s = '''
CLASS
EXPRESSION ("[TIME]" eq 'NOW' AND NOT "[TYPE]" ~ "(something|completely|different)")
END
'''
exp = '''CLASS EXPRESSION ( ( "[TIME]" eq 'NOW' ) AND NOT ( "[TYPE]" ~ "(something|completely|different)" ) ) END'''
assert(output(s) == exp)
def test_unquoted_unicode_string():
"""
See pull request #92 - French unquoted string
"""
s = '''
CLASS
EXPRESSION {Aérodrome,Aéroport,Héliport,Base spatiale}
END
'''
exp = u'''CLASS EXPRESSION {Aérodrome,Aéroport,Héliport,Base spatiale} END'''
assert(output(s) == exp)
def test_list_with_apostrophe():
"""
See https://github.com/geographika/mappyfile/issues/120
"""
s = '''
CLASS
EXPRESSION {bla,d'apostrophe}
END
'''
exp = u'''CLASS EXPRESSION {bla,d'apostrophe} END'''
assert(output(s) == exp)
def run_tests():
r"""
Need to comment out the following line in C:\VirtualEnvs\mappyfile\Lib\site-packages\pep8.py
#stdin_get_value = sys.stdin.read
Or get AttributeError: '_ReplInput' object has no attribute 'read'
"""
pytest.main(["tests/test_expressions.py"])
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
test_list_with_apostrophe()
# run_tests()
print("Done!")
| 22.765727 | 120 | 0.577608 |
import logging
import json
import inspect
import pytest
from mappyfile.parser import Parser
from mappyfile.pprint import PrettyPrinter
from mappyfile.transformer import MapfileToDict
def output(s):
p = Parser()
m = MapfileToDict(include_position=True)
logging.info(inspect.stack()[1][3])
ast = p.parse(s)
logging.debug(ast.pretty())
d = m.transform(ast)
logging.debug(json.dumps(d, indent=4))
pp = PrettyPrinter(indent=0, newlinechar=" ", quote="'")
s = pp.pprint(d)
logging.debug(s)
return s
def check_result(s):
try:
s2 = output(s)
assert(s == s2)
except AssertionError:
logging.info(s)
logging.info(s2)
raise
def test_class_expression1():
s = '''
CLASS
TEXT ([area])
END
'''
exp = "CLASS TEXT ([area]) END"
assert(output(s) == exp)
def test_class_expression2():
s = '''
CLASS
TEXT ("[area]")
END
'''
exp = 'CLASS TEXT ("[area]") END'
assert(output(s) == exp)
def test_complex_class_expression():
s = '''
CLASS
TEXT ("Area is: " + tostring([area],"%.2f"))
END
'''
exp = '''CLASS TEXT ("Area is: " + (tostring([area],"%.2f"))) END'''
assert(output(s) == exp)
def test_or_expressions():
s = '''
CLASS
EXPRESSION ("[style_class]" = "10" OR "[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION ( ( "[style_class]" = "10" ) OR ( "[style_class]" = "20" ) ) END'
assert(output(s) == exp)
s = '''
CLASS
EXPRESSION ("[style_class]" = "10" || "[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION ( ( "[style_class]" = "10" ) OR ( "[style_class]" = "20" ) ) END'
assert(output(s) == exp)
def test_and_expressions():
s = '''
CLASS
EXPRESSION ("[style_class]" = "10" AND "[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION ( ( "[style_class]" = "10" ) AND ( "[style_class]" = "20" ) ) END'
assert(output(s) == exp)
s = '''
CLASS
EXPRESSION ("[style_class]" = "10" && "[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION ( ( "[style_class]" = "10" ) AND ( "[style_class]" = "20" ) ) END'
assert(output(s) == exp)
def test_not_expressions():
s = '''
CLASS
EXPRESSION NOT("[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION NOT ( "[style_class]" = "20" ) END'
assert(output(s) == exp)
s = '''
CLASS
EXPRESSION !("[style_class]" = "20")
END
'''
exp = 'CLASS EXPRESSION NOT ( "[style_class]" = "20" ) END'
assert(output(s) == exp)
def test_runtime_expression():
s = """
CLASS
EXPRESSION ( [EPPL_Q100_] = %eppl% )
END
"""
exp = "CLASS EXPRESSION ( [EPPL_Q100_] = %eppl% ) END"
# print(output(s))
assert(output(s) == exp)
def test_ne_comparison():
s = """
CLASS
# EXPRESSION ( "[building]" IS NOT NULL) # incorrect syntax
EXPRESSION ( "[building]" NE NULL)
END
"""
exp = 'CLASS EXPRESSION ( "[building]" NE NULL ) END'
assert(output(s) == exp)
def test_eq_comparison():
s = """
CLASS
EXPRESSION ( "[building]" eq NULL)
END
"""
exp = 'CLASS EXPRESSION ( "[building]" eq NULL ) END'
# print(output(s))
assert(output(s) == exp)
def test_expression():
s = """
CLASS
EXPRESSION ('[construct]' ~* /Br.*$/)
STYLE
ANGLE 360
END
END
"""
exp = "CLASS EXPRESSION ( '[construct]' ~* /Br.*$/ ) STYLE ANGLE 360 END END"
assert(output(s) == exp)
def test_list_expression():
s = """
CLASS
EXPRESSION /NS_Bahn|NS_BahnAuto/
END
"""
exp = "CLASS EXPRESSION /NS_Bahn|NS_BahnAuto/ END"
assert(output(s) == exp)
def test_numerical_operator_ge_expression():
s = """
CLASS
EXPRESSION ([power] ge 10000)
END
"""
exp = "CLASS EXPRESSION ( [power] ge 10000 ) END"
assert(output(s) == exp)
def test_numerical_operator_gt_expression():
s = """
CLASS
EXPRESSION ([power] gt 10000)
END
"""
exp = "CLASS EXPRESSION ( [power] gt 10000 ) END"
assert(output(s) == exp)
def test_numerical_operator_le_expression():
s = """
CLASS
EXPRESSION ([power] le 100)
END
"""
exp = "CLASS EXPRESSION ( [power] le 100 ) END"
assert(output(s) == exp)
def test_numerical_operator_lt_expression():
s = """
CLASS
EXPRESSION ([power] lt 100)
END
"""
exp = "CLASS EXPRESSION ( [power] lt 100 ) END"
assert(output(s) == exp)
def test_divide():
s = """
CLASS
EXPRESSION ([field1] / [field2])
END
"""
exp = "CLASS EXPRESSION ([field1] / [field2]) END"
assert(output(s) == exp)
def test_multiply():
s = """
CLASS
EXPRESSION ([field1] * [field2])
END
"""
exp = "CLASS EXPRESSION ([field1] * [field2]) END"
assert(output(s) == exp)
def test_negation():
s = """
CLASS
EXPRESSION (-[field1])
END
"""
exp = "CLASS EXPRESSION (-[field1]) END"
assert(output(s) == exp)
def test_pointless_plus():
# Based on test_negation
s = """
CLASS
EXPRESSION (+[field1])
END
"""
exp = "CLASS EXPRESSION ([field1]) END"
assert(output(s) == exp)
def test_power():
s = """
CLASS
EXPRESSION ([field1] ^ [field2])
END
"""
exp = "CLASS EXPRESSION ([field1] ^ [field2]) END"
assert(output(s) == exp)
def test_divide_expression():
s = """
CLASS
EXPRESSION ([field1] / [field2] > 0.1)
END
"""
exp = "CLASS EXPRESSION ( [field1] / [field2] > 0.1 ) END"
assert(output(s) == exp)
def test_modulo_expression():
s = """
CLASS
EXPRESSION ( ([height] % 50) = 0 )
END
"""
exp = "CLASS EXPRESSION ( ( [height] % 50 ) = 0 ) END"
assert(output(s) == exp)
def test_escaped_string():
s = r"""
CLASS
EXPRESSION "National \"hero\" statue"
END
"""
exp = """CLASS EXPRESSION 'National \\"hero\\" statue' END"""
assert(output(s) == exp)
def test_list_expression_alt():
s = """
CLASS
EXPRESSION {2_Klass,Rte2etr}
END
"""
exp = "CLASS EXPRESSION {2_Klass,Rte2etr} END"
assert(output(s) == exp)
s = """
CLASS
EXPRESSION {2_Klass,class with space}
END
"""
exp = "CLASS EXPRESSION {2_Klass,class with space} END"
assert(output(s) == exp)
def test_class_expression_oddname():
s = '''
CLASS
TEXT ([area:ian])
END
'''
exp = "CLASS TEXT ([area:ian]) END"
assert(output(s) == exp)
def test_class_not_expression_brackets():
s = '''
CLASS
EXPRESSION (("[TIME]" eq 'NOW') AND NOT ("[TYPE]" ~ "(something|completely|different)"))
END
'''
exp = '''CLASS EXPRESSION ( ( "[TIME]" eq 'NOW' ) AND NOT ( "[TYPE]" ~ "(something|completely|different)" ) ) END'''
print(output(s))
assert(output(s) == exp)
def test_class_not_expression_no_brackets():
s = '''
CLASS
EXPRESSION ("[TIME]" eq 'NOW' AND NOT "[TYPE]" ~ "(something|completely|different)")
END
'''
exp = '''CLASS EXPRESSION ( ( "[TIME]" eq 'NOW' ) AND NOT ( "[TYPE]" ~ "(something|completely|different)" ) ) END'''
assert(output(s) == exp)
def test_unquoted_unicode_string():
s = '''
CLASS
EXPRESSION {Aérodrome,Aéroport,Héliport,Base spatiale}
END
'''
exp = u'''CLASS EXPRESSION {Aérodrome,Aéroport,Héliport,Base spatiale} END'''
assert(output(s) == exp)
def test_list_with_apostrophe():
s = '''
CLASS
EXPRESSION {bla,d'apostrophe}
END
'''
exp = u'''CLASS EXPRESSION {bla,d'apostrophe} END'''
assert(output(s) == exp)
def run_tests():
pytest.main(["tests/test_expressions.py"])
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
test_list_with_apostrophe()
# run_tests()
print("Done!")
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.